--- old/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-07-17 10:39:42.974135782 +0200 +++ new/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-07-17 10:39:42.818135787 +0200 @@ -159,15 +159,10 @@ } #endif // ARM -inline void Atomic::store(jint store_value, volatile jint* dest) { - *dest = store_value; -} - -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { - *dest = store_value; -} +#ifdef _LP64 -inline jint Atomic::add(jint add_value, volatile jint* dest) { +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { #ifdef ARM return arm_add_and_fetch(dest, add_value); #else @@ -179,47 +174,40 @@ #endif // ARM } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { + +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { #ifdef ARM - return arm_add_and_fetch(dest, add_value); + return arm_lock_test_and_set(dest, exchange_value); #else #ifdef M68K - return m68k_add_and_fetch(dest, add_value); + return m68k_lock_test_and_set(dest, exchange_value); #else - return __sync_add_and_fetch(dest, add_value); + intptr_t result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); -} - -inline void Atomic::inc(volatile jint* dest) { - add(1, dest); -} +#endif // _LP64 -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - add_ptr(1, dest); -} - -inline void Atomic::inc_ptr(volatile void* dest) { - add_ptr(1, dest); -} - -inline void Atomic::dec(volatile jint* dest) { - add(-1, dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - add_ptr(-1, dest); +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { +#ifdef ARM + return arm_add_and_fetch(dest, add_value); +#else +#ifdef M68K + return m68k_add_and_fetch(dest, add_value); +#else + return __sync_add_and_fetch(dest, add_value); +#endif // M68K +#endif // ARM } -inline void Atomic::dec_ptr(volatile void* dest) { - add_ptr(-1, dest); -} -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { #ifdef ARM return arm_lock_test_and_set(dest, exchange_value); #else @@ -230,7 +218,7 @@ // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). - jint result = __sync_lock_test_and_set (dest, exchange_value); + int32_t result = __sync_lock_test_and_set (dest, exchange_value); // All atomic operations are expected to be full memory barriers // (see atomic.hpp). However, __sync_lock_test_and_set is not // a full memory barrier, but an acquire barrier. Hence, this added @@ -241,30 +229,9 @@ #endif // ARM } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest) { -#ifdef ARM - return arm_lock_test_and_set(dest, exchange_value); -#else -#ifdef M68K - return m68k_lock_test_and_set(dest, exchange_value); -#else - intptr_t result = __sync_lock_test_and_set (dest, exchange_value); - __sync_synchronize(); - return result; -#endif // M68K -#endif // ARM -} -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void *) xchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest); -} - -inline jint Atomic::cmpxchg(jint exchange_value, - volatile jint* dest, - jint compare_value, - cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { #ifdef ARM return arm_compare_and_swap(dest, compare_value, exchange_value); #else @@ -276,52 +243,24 @@ #endif // ARM } -inline jlong Atomic::cmpxchg(jlong exchange_value, - volatile jlong* dest, - jlong compare_value, - cmpxchg_memory_order order) { - - return __sync_val_compare_and_swap(dest, compare_value, exchange_value); -} -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest, - intptr_t compare_value, - cmpxchg_memory_order order) { -#ifdef ARM - return arm_compare_and_swap(dest, compare_value, exchange_value); -#else -#ifdef M68K - return m68k_compare_and_swap(dest, compare_value, exchange_value); -#else +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return __sync_val_compare_and_swap(dest, compare_value, exchange_value); -#endif // M68K -#endif // ARM } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, - volatile void* dest, - void* compare_value, - cmpxchg_memory_order order) { - - return (void *) cmpxchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest, - (intptr_t) compare_value, - order); -} -inline jlong Atomic::load(const volatile jlong* src) { - volatile jlong dest; +template<> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + volatile int64_t dest; os::atomic_copy64(src, &dest); return dest; } -inline void Atomic::store(jlong store_value, jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); -} -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, dest); +template<> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + os::atomic_copy64((volatile int64_t*)&store_value, dest); } #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP