253 __sync_synchronize();
254 return result;
255 #endif // M68K
256 #endif // ARM
257 }
258
259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
260 return (void *) xchg_ptr((intptr_t) exchange_value,
261 (volatile intptr_t*) dest);
262 }
263
264 // No direct support for cmpxchg of bytes; emulate using int.
265 template<>
266 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
267
268 template<>
269 template<typename T>
270 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
271 T volatile* dest,
272 T compare_value,
273 cmpxchg_memory_order order) {
274 STATIC_ASSERT(4 == sizeof(T));
275 #ifdef ARM
276 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
277 #else
278 #ifdef M68K
279 return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
280 #else
281 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
282 #endif // M68K
283 #endif // ARM
284 }
285
286 template<>
287 template<typename T>
288 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
289 T volatile* dest,
290 T compare_value,
291 cmpxchg_memory_order order) {
292 STATIC_ASSERT(8 == sizeof(T));
293 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
294 }
295
296 inline jlong Atomic::load(const volatile jlong* src) {
297 volatile jlong dest;
298 os::atomic_copy64(src, &dest);
299 return dest;
300 }
301
302 inline void Atomic::store(jlong store_value, jlong* dest) {
303 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
304 }
305
306 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
307 os::atomic_copy64((volatile jlong*)&store_value, dest);
308 }
309
310 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
|
253 __sync_synchronize();
254 return result;
255 #endif // M68K
256 #endif // ARM
257 }
258
259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
260 return (void *) xchg_ptr((intptr_t) exchange_value,
261 (volatile intptr_t*) dest);
262 }
263
264 // No direct support for cmpxchg of bytes; emulate using int.
265 template<>
266 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
267
268 template<>
269 template<typename T>
270 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
271 T volatile* dest,
272 T compare_value,
273 cmpxchg_memory_order order) const {
274 STATIC_ASSERT(4 == sizeof(T));
275 #ifdef ARM
276 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
277 #else
278 #ifdef M68K
279 return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
280 #else
281 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
282 #endif // M68K
283 #endif // ARM
284 }
285
286 template<>
287 template<typename T>
288 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
289 T volatile* dest,
290 T compare_value,
291 cmpxchg_memory_order order) const {
292 STATIC_ASSERT(8 == sizeof(T));
293 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
294 }
295
296 inline jlong Atomic::load(const volatile jlong* src) {
297 volatile jlong dest;
298 os::atomic_copy64(src, &dest);
299 return dest;
300 }
301
302 inline void Atomic::store(jlong store_value, jlong* dest) {
303 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
304 }
305
306 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
307 os::atomic_copy64((volatile jlong*)&store_value, dest);
308 }
309
310 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
|