254 return result;
255 #endif // M68K
256 #endif // ARM
257 }
258
259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
260 return (void *) xchg_ptr((intptr_t) exchange_value,
261 (volatile intptr_t*) dest);
262 }
263
264 // No direct support for cmpxchg of bytes; emulate using int.
265 template<>
266 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
267
268 template<>
269 template<typename T>
270 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
271 T volatile* dest,
272 T compare_value,
273 cmpxchg_memory_order order) {
274 #ifdef ARM
275 return cmpxchg_using_stub<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
276 #else
277 #ifdef M68K
278 return cmpxchg_using_stub<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
279 #else
280 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
281 #endif // M68K
282 #endif // ARM
283 }
284
285 template<>
286 template<typename T>
287 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
288 T volatile* dest,
289 T compare_value,
290 cmpxchg_memory_order order) {
291 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
292 }
293
294 inline jlong Atomic::load(const volatile jlong* src) {
295 volatile jlong dest;
296 os::atomic_copy64(src, &dest);
297 return dest;
298 }
299
300 inline void Atomic::store(jlong store_value, jlong* dest) {
301 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
302 }
303
304 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
305 os::atomic_copy64((volatile jlong*)&store_value, dest);
306 }
307
308 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
|
254 return result;
255 #endif // M68K
256 #endif // ARM
257 }
258
259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
260 return (void *) xchg_ptr((intptr_t) exchange_value,
261 (volatile intptr_t*) dest);
262 }
263
264 // No direct support for cmpxchg of bytes; emulate using int.
265 template<>
266 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
267
268 template<>
269 template<typename T>
270 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
271 T volatile* dest,
272 T compare_value,
273 cmpxchg_memory_order order) {
274 STATIC_ASSERT(4 == sizeof(T));
275 #ifdef ARM
276 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
277 #else
278 #ifdef M68K
279 return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
280 #else
281 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
282 #endif // M68K
283 #endif // ARM
284 }
285
286 template<>
287 template<typename T>
288 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
289 T volatile* dest,
290 T compare_value,
291 cmpxchg_memory_order order) {
292 STATIC_ASSERT(8 == sizeof(T));
293 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
294 }
295
296 inline jlong Atomic::load(const volatile jlong* src) {
297 volatile jlong dest;
298 os::atomic_copy64(src, &dest);
299 return dest;
300 }
301
302 inline void Atomic::store(jlong store_value, jlong* dest) {
303 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
304 }
305
306 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
307 os::atomic_copy64((volatile jlong*)&store_value, dest);
308 }
309
310 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
|