< prev index next >

src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp

Print this page

        

*** 55,107 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { ! template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; }; #ifdef AMD64 template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { ! return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); } template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { ! return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest); } #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ template<> \ template<typename T> \ ! inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \ ! T volatile* dest, \ atomic_memory_order order) const { \ STATIC_ASSERT(ByteSize == sizeof(T)); \ ! return xchg_using_helper<StubType>(StubName, exchange_value, dest); \ } DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) #undef DEFINE_STUB_XCHG #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ template<> \ template<typename T> \ ! inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ ! T volatile* dest, \ T compare_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(ByteSize == sizeof(T)); \ ! return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ } DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func) --- 55,107 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { ! template<typename D, typename I> ! D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; #ifdef AMD64 template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { ! return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); } template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { ! return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value); } #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ template<> \ template<typename T> \ ! inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \ ! T exchange_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(ByteSize == sizeof(T)); \ ! return xchg_using_helper<StubType>(StubName, dest, exchange_value); \ } DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) #undef DEFINE_STUB_XCHG #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ template<> \ template<typename T> \ ! inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \ T compare_value, \ + T exchange_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(ByteSize == sizeof(T)); \ ! return cmpxchg_using_helper<StubType>(StubName, dest, compare_value, exchange_value); \ } DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
*** 109,120 **** #undef DEFINE_STUB_CMPXCHG #else // !AMD64 template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); __asm { mov edx, dest; --- 109,120 ---- #undef DEFINE_STUB_CMPXCHG #else // !AMD64 template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); __asm { mov edx, dest;
*** 125,136 **** } } template<> template<typename T> ! inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedExchange __asm { mov eax, exchange_value; --- 125,136 ---- } } template<> template<typename T> ! inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, ! T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedExchange __asm { mov eax, exchange_value;
*** 139,151 **** } } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // alternative for InterlockedCompareExchange __asm { mov edx, dest --- 139,151 ---- } } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // alternative for InterlockedCompareExchange __asm { mov edx, dest
*** 155,167 **** } } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedCompareExchange __asm { mov edx, dest --- 155,167 ---- } } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedCompareExchange __asm { mov edx, dest
*** 171,183 **** } } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); int32_t ex_lo = (int32_t)exchange_value; int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); int32_t cmp_lo = (int32_t)compare_value; --- 171,183 ---- } } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); int32_t ex_lo = (int32_t)exchange_value; int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); int32_t cmp_lo = (int32_t)compare_value;
*** 211,222 **** return dest; } template<> template<typename T> ! inline void Atomic::PlatformStore<8>::operator()(T store_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); volatile T* src = &store_value; __asm { mov eax, src fild qword ptr [eax] --- 211,222 ---- return dest; } template<> template<typename T> ! inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, ! T store_value) const { STATIC_ASSERT(8 == sizeof(T)); volatile T* src = &store_value; __asm { mov eax, src fild qword ptr [eax]
*** 232,242 **** #ifndef AMD64 template<> struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm { mov edx, p; mov al, v; xchg al, byte ptr [edx]; } --- 232,242 ---- #ifndef AMD64 template<> struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov al, v; xchg al, byte ptr [edx]; }
*** 245,255 **** template<> struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm { mov edx, p; mov ax, v; xchg ax, word ptr [edx]; } --- 245,255 ---- template<> struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov ax, v; xchg ax, word ptr [edx]; }
*** 258,268 **** template<> struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm { mov edx, p; mov eax, v; xchg eax, dword ptr [edx]; } --- 258,268 ---- template<> struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov eax, v; xchg eax, dword ptr [edx]; }
< prev index next >