< prev index next >

src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp

Print this page
rev 48027 : 8192123: Zero should use compiler built-ins for atomics on linux-arm

*** 28,98 **** #include "runtime/os.hpp" // Implementation of class atomic - #ifdef ARM - - /* - * __kernel_cmpxchg - * - * Atomically store newval in *ptr if *ptr is equal to oldval for user space. - * Return zero if *ptr was changed or non-zero if no exchange happened. - * The C flag is also set if *ptr was changed to allow for assembly - * optimization in the calling code. - * - */ - - typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr); - #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) - - - - /* Perform an atomic compare and swap: if the current value of `*PTR' - is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of - `*PTR' before the operation.*/ - static inline int arm_compare_and_swap(int newval, - volatile int *ptr, - int oldval) { - for (;;) { - int prev = *ptr; - if (prev != oldval) - return prev; - - if (__kernel_cmpxchg (prev, newval, ptr) == 0) - // Success. - return prev; - - // We failed even though prev == oldval. Try again. - } - } - - /* Atomically add an int to memory. */ - static inline int arm_add_and_fetch(int add_value, volatile int *ptr) { - for (;;) { - // Loop until a __kernel_cmpxchg succeeds. - - int prev = *ptr; - - if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0) - return prev + add_value; - } - } - - /* Atomically write VALUE into `*PTR' and returns the previous - contents of `*PTR'. */ - static inline int arm_lock_test_and_set(int newval, volatile int *ptr) { - for (;;) { - // Loop until a __kernel_cmpxchg succeeds. - int prev = *ptr; - - if (__kernel_cmpxchg (prev, newval, ptr) == 0) - return prev; - } - } - #endif // ARM - template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> --- 28,37 ----
*** 103,136 **** template<typename I, typename D> inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); - #ifdef ARM - return add_using_helper<int>(arm_add_and_fetch, add_value, dest); - #else return __sync_add_and_fetch(dest, add_value); - #endif // ARM } template<> template<typename I, typename D> inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); - return __sync_add_and_fetch(dest, add_value); } template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); - #ifdef ARM - return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); - #else // __sync_lock_test_and_set is a bizarrely named atomic exchange // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). T result = __sync_lock_test_and_set (dest, exchange_value); --- 42,67 ----
*** 138,148 **** // (see atomic.hpp). However, __sync_lock_test_and_set is not // a full memory barrier, but an acquire barrier. Hence, this added // barrier. __sync_synchronize(); return result; - #endif // ARM } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, --- 69,78 ----
*** 162,176 **** inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); - #ifdef ARM - return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value); - #else return __sync_val_compare_and_swap(dest, compare_value, exchange_value); - #endif // ARM } template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, --- 92,102 ----
< prev index next >