62 }; 63 64 #ifdef AMD64 65 template<> 66 template<typename D, typename I> 67 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 68 atomic_memory_order order) const { 69 return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); 70 } 71 72 template<> 73 template<typename D, typename I> 74 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, 75 atomic_memory_order order) const { 76 return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value); 77 } 78 79 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 80 template<> \ 81 template<typename T> \ 82 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \ 83 T volatile* dest, \ 84 atomic_memory_order order) const { \ 85 STATIC_ASSERT(ByteSize == sizeof(T)); \ 86 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \ 87 } 88 89 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) 90 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) 91 92 #undef DEFINE_STUB_XCHG 93 94 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 95 template<> \ 96 template<typename T> \ 97 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ 98 T volatile* dest, \ 99 T compare_value, \ 100 atomic_memory_order order) const { \ 101 STATIC_ASSERT(ByteSize == sizeof(T)); \ 102 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ 103 } 104 105 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) 106 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) 110 111 #else // !AMD64 112 113 template<> 114 template<typename D, typename I> 115 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 116 atomic_memory_order order) const { 117 STATIC_ASSERT(4 == sizeof(I)); 118 STATIC_ASSERT(4 == sizeof(D)); 119 __asm { 120 mov edx, dest; 121 mov eax, add_value; 122 mov ecx, eax; 123 lock xadd dword ptr [edx], eax; 124 add eax, ecx; 125 } 126 } 127 128 template<> 129 template<typename T> 130 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 131 T volatile* dest, 132 atomic_memory_order order) const { 133 STATIC_ASSERT(4 == sizeof(T)); 134 // alternative for InterlockedExchange 135 __asm { 136 mov eax, exchange_value; 137 mov ecx, dest; 138 xchg eax, dword ptr [ecx]; 139 } 140 } 141 142 template<> 143 template<typename T> 144 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, 145 T volatile* dest, 146 T compare_value, 147 atomic_memory_order order) const { 148 STATIC_ASSERT(1 == sizeof(T)); 149 // alternative for InterlockedCompareExchange 150 __asm { 151 mov edx, dest | 62 }; 63 64 #ifdef AMD64 65 template<> 66 template<typename D, typename I> 67 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 68 atomic_memory_order order) const { 69 return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); 70 } 71 72 template<> 73 template<typename D, typename I> 74 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, 75 atomic_memory_order order) const { 76 return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value); 77 } 78 79 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 80 template<> \ 81 template<typename T> \ 82 inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \ 83 T exchange_value, \ 84 atomic_memory_order order) const { \ 85 STATIC_ASSERT(ByteSize == sizeof(T)); \ 86 return xchg_using_helper<StubType>(StubName, dest, exchange_value); \ 87 } 88 89 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) 90 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) 91 92 #undef DEFINE_STUB_XCHG 93 94 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 95 template<> \ 96 template<typename T> \ 97 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ 98 T volatile* dest, \ 99 T compare_value, \ 100 atomic_memory_order order) const { \ 101 STATIC_ASSERT(ByteSize == sizeof(T)); \ 102 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ 103 } 104 105 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) 106 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) 110 111 #else // !AMD64 112 113 template<> 114 template<typename D, typename I> 115 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 116 atomic_memory_order order) const { 117 STATIC_ASSERT(4 == sizeof(I)); 118 STATIC_ASSERT(4 == sizeof(D)); 119 __asm { 120 mov edx, dest; 121 mov eax, add_value; 122 mov ecx, eax; 123 lock xadd dword ptr [edx], eax; 124 add eax, ecx; 125 } 126 } 127 128 template<> 129 template<typename T> 130 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, 131 T exchange_value, 132 atomic_memory_order order) const { 133 STATIC_ASSERT(4 == sizeof(T)); 134 // alternative for InterlockedExchange 135 __asm { 136 mov eax, exchange_value; 137 mov ecx, dest; 138 xchg eax, dword ptr [ecx]; 139 } 140 } 141 142 template<> 143 template<typename T> 144 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, 145 T volatile* dest, 146 T compare_value, 147 atomic_memory_order order) const { 148 STATIC_ASSERT(1 == sizeof(T)); 149 // alternative for InterlockedCompareExchange 150 __asm { 151 mov edx, dest |