77 } 78 79 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 80 template<> \ 81 template<typename T> \ 82 inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \ 83 T exchange_value, \ 84 atomic_memory_order order) const { \ 85 STATIC_ASSERT(ByteSize == sizeof(T)); \ 86 return xchg_using_helper<StubType>(StubName, dest, exchange_value); \ 87 } 88 89 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) 90 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) 91 92 #undef DEFINE_STUB_XCHG 93 94 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 95 template<> \ 96 template<typename T> \ 97 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ 98 T volatile* dest, \ 99 T compare_value, \ 100 atomic_memory_order order) const { \ 101 STATIC_ASSERT(ByteSize == sizeof(T)); \ 102 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ 103 } 104 105 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) 106 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) 107 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func) 108 109 #undef DEFINE_STUB_CMPXCHG 110 111 #else // !AMD64 112 113 template<> 114 template<typename D, typename I> 115 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 116 atomic_memory_order order) const { 117 STATIC_ASSERT(4 == sizeof(I)); 118 STATIC_ASSERT(4 == sizeof(D)); 119 __asm { 120 mov edx, dest; 121 mov eax, add_value; 122 mov ecx, eax; 124 add eax, ecx; 125 } 126 } 127 128 template<> 129 template<typename T> 130 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, 131 T exchange_value, 132 atomic_memory_order order) const { 133 STATIC_ASSERT(4 == sizeof(T)); 134 // alternative for InterlockedExchange 135 __asm { 136 mov eax, exchange_value; 137 mov ecx, dest; 138 xchg eax, dword ptr [ecx]; 139 } 140 } 141 142 template<> 143 template<typename T> 144 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, 145 T volatile* dest, 146 T compare_value, 147 atomic_memory_order order) const { 148 STATIC_ASSERT(1 == sizeof(T)); 149 // alternative for InterlockedCompareExchange 150 __asm { 151 mov edx, dest 152 mov cl, exchange_value 153 mov al, compare_value 154 lock cmpxchg byte ptr [edx], cl 155 } 156 } 157 158 template<> 159 template<typename T> 160 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 161 T volatile* dest, 162 T compare_value, 163 atomic_memory_order order) const { 164 STATIC_ASSERT(4 == sizeof(T)); 165 // alternative for InterlockedCompareExchange 166 __asm { 167 mov edx, dest 168 mov ecx, exchange_value 169 mov eax, compare_value 170 lock cmpxchg dword ptr [edx], ecx 171 } 172 } 173 174 template<> 175 template<typename T> 176 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 177 T volatile* dest, 178 T compare_value, 179 atomic_memory_order order) const { 180 STATIC_ASSERT(8 == sizeof(T)); 181 int32_t ex_lo = (int32_t)exchange_value; 182 int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); 183 int32_t cmp_lo = (int32_t)compare_value; 184 int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 ); 185 __asm { 186 push ebx 187 push edi 188 mov eax, cmp_lo 189 mov edx, cmp_hi 190 mov edi, dest 191 mov ebx, ex_lo 192 mov ecx, ex_hi 193 lock cmpxchg8b qword ptr [edi] 194 pop edi 195 pop ebx 196 } 197 } 198 | 77 } 78 79 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 80 template<> \ 81 template<typename T> \ 82 inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \ 83 T exchange_value, \ 84 atomic_memory_order order) const { \ 85 STATIC_ASSERT(ByteSize == sizeof(T)); \ 86 return xchg_using_helper<StubType>(StubName, dest, exchange_value); \ 87 } 88 89 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) 90 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) 91 92 #undef DEFINE_STUB_XCHG 93 94 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 95 template<> \ 96 template<typename T> \ 97 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \ 98 T compare_value, \ 99 T exchange_value, \ 100 atomic_memory_order order) const { \ 101 STATIC_ASSERT(ByteSize == sizeof(T)); \ 102 return cmpxchg_using_helper<StubType>(StubName, dest, compare_value, exchange_value); \ 103 } 104 105 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) 106 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) 107 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func) 108 109 #undef DEFINE_STUB_CMPXCHG 110 111 #else // !AMD64 112 113 template<> 114 template<typename D, typename I> 115 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 116 atomic_memory_order order) const { 117 STATIC_ASSERT(4 == sizeof(I)); 118 STATIC_ASSERT(4 == sizeof(D)); 119 __asm { 120 mov edx, dest; 121 mov eax, add_value; 122 mov ecx, eax; 124 add eax, ecx; 125 } 126 } 127 128 template<> 129 template<typename T> 130 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, 131 T exchange_value, 132 atomic_memory_order order) const { 133 STATIC_ASSERT(4 == sizeof(T)); 134 // alternative for InterlockedExchange 135 __asm { 136 mov eax, exchange_value; 137 mov ecx, dest; 138 xchg eax, dword ptr [ecx]; 139 } 140 } 141 142 template<> 143 template<typename T> 144 inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, 145 T compare_value, 146 T exchange_value, 147 atomic_memory_order order) const { 148 STATIC_ASSERT(1 == sizeof(T)); 149 // alternative for InterlockedCompareExchange 150 __asm { 151 mov edx, dest 152 mov cl, exchange_value 153 mov al, compare_value 154 lock cmpxchg byte ptr [edx], cl 155 } 156 } 157 158 template<> 159 template<typename T> 160 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, 161 T compare_value, 162 T exchange_value, 163 atomic_memory_order order) const { 164 STATIC_ASSERT(4 == sizeof(T)); 165 // alternative for InterlockedCompareExchange 166 __asm { 167 mov edx, dest 168 mov ecx, exchange_value 169 mov eax, compare_value 170 lock cmpxchg dword ptr [edx], ecx 171 } 172 } 173 174 template<> 175 template<typename T> 176 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, 177 T compare_value, 178 T exchange_value, 179 atomic_memory_order order) const { 180 STATIC_ASSERT(8 == sizeof(T)); 181 int32_t ex_lo = (int32_t)exchange_value; 182 int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); 183 int32_t cmp_lo = (int32_t)compare_value; 184 int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 ); 185 __asm { 186 push ebx 187 push edi 188 mov eax, cmp_lo 189 mov edx, cmp_hi 190 mov edi, dest 191 mov ebx, ex_lo 192 mov ecx, ex_hi 193 lock cmpxchg8b qword ptr [edi] 194 pop edi 195 pop ebx 196 } 197 } 198 |