146 } 147 } 148 149 /* Atomically write VALUE into `*PTR' and returns the previous 150 contents of `*PTR'. */ 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) { 152 for (;;) { 153 // Loop until a __kernel_cmpxchg succeeds. 154 int prev = *ptr; 155 156 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 157 return prev; 158 } 159 } 160 #endif // ARM 161 162 template<size_t byte_size> 163 struct Atomic::PlatformAdd 164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 165 { 166 template<typename I, typename D> 167 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; 168 }; 169 170 template<> 171 template<typename I, typename D> 172 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, 173 atomic_memory_order order) const { 174 STATIC_ASSERT(4 == sizeof(I)); 175 STATIC_ASSERT(4 == sizeof(D)); 176 177 #ifdef ARM 178 return add_using_helper<int>(arm_add_and_fetch, add_value, dest); 179 #else 180 #ifdef M68K 181 return add_using_helper<int>(m68k_add_and_fetch, add_value, dest); 182 #else 183 return __sync_add_and_fetch(dest, add_value); 184 #endif // M68K 185 #endif // ARM 186 } 187 188 template<> 189 template<typename I, typename D> 190 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, 191 atomic_memory_order order) const { 192 STATIC_ASSERT(8 == sizeof(I)); 193 STATIC_ASSERT(8 == sizeof(D)); 194 195 return __sync_add_and_fetch(dest, add_value); 196 } 197 198 template<> 199 template<typename T> 200 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 201 T volatile* dest, 202 atomic_memory_order order) const { 203 STATIC_ASSERT(4 == sizeof(T)); 204 #ifdef ARM 205 return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); 206 #else 207 #ifdef M68K 208 return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest); 209 #else 210 // __sync_lock_test_and_set is a bizarrely named atomic exchange 211 // operation. Note that some platforms only support this with the 212 // limitation that the only valid value to store is the immediate 213 // constant 1. There is a test for this in JNI_CreateJavaVM(). 214 T result = __sync_lock_test_and_set (dest, exchange_value); 215 // All atomic operations are expected to be full memory barriers 216 // (see atomic.hpp). However, __sync_lock_test_and_set is not 217 // a full memory barrier, but an acquire barrier. Hence, this added 218 // barrier. 219 __sync_synchronize(); 220 return result; 221 #endif // M68K 222 #endif // ARM 223 } 224 225 template<> 226 template<typename T> 227 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, 228 T volatile* dest, 229 atomic_memory_order order) const { 230 STATIC_ASSERT(8 == sizeof(T)); 231 T result = __sync_lock_test_and_set (dest, exchange_value); 232 __sync_synchronize(); 233 return result; 234 } 235 236 // No direct support for cmpxchg of bytes; emulate using int. 237 template<> 238 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 239 240 template<> 241 template<typename T> 242 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 243 T volatile* dest, 244 T compare_value, 245 atomic_memory_order order) const { 246 STATIC_ASSERT(4 == sizeof(T)); 247 #ifdef ARM 248 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value); 249 #else 250 #ifdef M68K 251 return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value); 252 #else 253 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 254 #endif // M68K 255 #endif // ARM 256 } 257 258 template<> 259 template<typename T> 260 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 261 T volatile* dest, 262 T compare_value, 263 atomic_memory_order order) const { 264 STATIC_ASSERT(8 == sizeof(T)); 265 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 266 } 267 268 template<> 269 template<typename T> 270 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { 271 STATIC_ASSERT(8 == sizeof(T)); 272 volatile int64_t dest; 273 os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest)); 274 return PrimitiveConversions::cast<T>(dest); 275 } 276 277 template<> 278 template<typename T> 279 inline void Atomic::PlatformStore<8>::operator()(T store_value, 280 T volatile* dest) const { 281 STATIC_ASSERT(8 == sizeof(T)); 282 os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest)); 283 } 284 285 #endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP | 146 } 147 } 148 149 /* Atomically write VALUE into `*PTR' and returns the previous 150 contents of `*PTR'. */ 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) { 152 for (;;) { 153 // Loop until a __kernel_cmpxchg succeeds. 154 int prev = *ptr; 155 156 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 157 return prev; 158 } 159 } 160 #endif // ARM 161 162 template<size_t byte_size> 163 struct Atomic::PlatformAdd 164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 165 { 166 template<typename D, typename I> 167 D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; 168 }; 169 170 template<> 171 template<typename D, typename I> 172 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 173 atomic_memory_order order) const { 174 STATIC_ASSERT(4 == sizeof(I)); 175 STATIC_ASSERT(4 == sizeof(D)); 176 177 #ifdef ARM 178 return add_using_helper<int>(arm_add_and_fetch, dest, add_value); 179 #else 180 #ifdef M68K 181 return add_using_helper<int>(m68k_add_and_fetch, dest, add_value); 182 #else 183 return __sync_add_and_fetch(dest, add_value); 184 #endif // M68K 185 #endif // ARM 186 } 187 188 template<> 189 template<typename D, typename !> 190 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, 191 atomic_memory_order order) const { 192 STATIC_ASSERT(8 == sizeof(I)); 193 STATIC_ASSERT(8 == sizeof(D)); 194 195 return __sync_add_and_fetch(dest, add_value); 196 } 197 198 template<> 199 template<typename T> 200 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, 201 T exchange_value, 202 atomic_memory_order order) const { 203 STATIC_ASSERT(4 == sizeof(T)); 204 #ifdef ARM 205 return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value); 206 #else 207 #ifdef M68K 208 return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value); 209 #else 210 // __sync_lock_test_and_set is a bizarrely named atomic exchange 211 // operation. Note that some platforms only support this with the 212 // limitation that the only valid value to store is the immediate 213 // constant 1. There is a test for this in JNI_CreateJavaVM(). 214 T result = __sync_lock_test_and_set (dest, exchange_value); 215 // All atomic operations are expected to be full memory barriers 216 // (see atomic.hpp). However, __sync_lock_test_and_set is not 217 // a full memory barrier, but an acquire barrier. Hence, this added 218 // barrier. 219 __sync_synchronize(); 220 return result; 221 #endif // M68K 222 #endif // ARM 223 } 224 225 template<> 226 template<typename T> 227 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, 228 T exchange_value, 229 atomic_memory_order order) const { 230 STATIC_ASSERT(8 == sizeof(T)); 231 T result = __sync_lock_test_and_set (dest, exchange_value); 232 __sync_synchronize(); 233 return result; 234 } 235 236 // No direct support for cmpxchg of bytes; emulate using int. 237 template<> 238 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 239 240 template<> 241 template<typename T> 242 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, 243 T compare_value, 244 T exchange_value, 245 atomic_memory_order order) const { 246 STATIC_ASSERT(4 == sizeof(T)); 247 #ifdef ARM 248 return cmpxchg_using_helper<int>(arm_compare_and_swap, dest, compare_value, exchange_value); 249 #else 250 #ifdef M68K 251 return cmpxchg_using_helper<int>(m68k_compare_and_swap, dest, compare_value, exchange_value); 252 #else 253 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 254 #endif // M68K 255 #endif // ARM 256 } 257 258 template<> 259 template<typename T> 260 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, 261 T compare_value, 262 T exchange_value, 263 atomic_memory_order order) const { 264 STATIC_ASSERT(8 == sizeof(T)); 265 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 266 } 267 268 template<> 269 template<typename T> 270 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { 271 STATIC_ASSERT(8 == sizeof(T)); 272 volatile int64_t dest; 273 os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest)); 274 return PrimitiveConversions::cast<T>(dest); 275 } 276 277 template<> 278 template<typename T> 279 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, 280 T store_value) const { 281 STATIC_ASSERT(8 == sizeof(T)); 282 os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest)); 283 } 284 285 #endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP |