--- old/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-07-14 18:05:35.438232873 +0200 +++ new/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-07-14 18:05:35.290232878 +0200 @@ -44,40 +44,26 @@ * kernel source or kernel_user_helpers.txt in Linux Doc. */ -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline jlong Atomic::load (const volatile jlong* src) { - assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned"); +template <> +inline void Atomic::specialized_store(int64_t value, volatile int64_t* dest) { + assert(((intx)dest & (sizeof(int64_t)-1)) == 0, "Atomic 64 bit store mis-aligned"); #ifdef AARCH64 - return *src; + *dest = value; #else - return (*os::atomic_load_long_func)(src); + (*os::atomic_store_long_func)(value, dest); #endif } -inline void Atomic::store (jlong value, volatile jlong* dest) { - assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned"); +template <> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + assert(((intx)src & (sizeof(int64_t)-1)) == 0, "Atomic 64 bit load mis-aligned"); #ifdef AARCH64 - *dest = value; + return *src; #else - (*os::atomic_store_long_func)(value, dest); + return (*os::atomic_load_long_func)(src); #endif } -inline void Atomic::store (jlong value, jlong* dest) { - store(value, (volatile jlong*)dest); -} - // As per atomic.hpp all read-modify-write operations have to provide two-way // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and // store-release-with-reservation. While load-acquire combined with store-release @@ -91,9 +77,10 @@ // // For ARMv7 we add explicit barriers in the stubs. -inline jint Atomic::add(jint add_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { #ifdef AARCH64 - jint val; + int32_t val; int tmp; __asm__ volatile( "1:\n\t" @@ -110,57 +97,10 @@ #endif } -inline void Atomic::inc(volatile jint* dest) { - Atomic::add(1, (volatile jint *)dest); -} - -inline void Atomic::dec(volatile jint* dest) { - Atomic::add(-1, (volatile jint *)dest); -} - -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { -#ifdef AARCH64 - intptr_t val; - int tmp; - __asm__ volatile( - "1:\n\t" - " ldaxr %[val], [%[dest]]\n\t" - " add %[val], %[val], %[add_val]\n\t" - " stlxr %w[tmp], %[val], [%[dest]]\n\t" - " cbnz %w[tmp], 1b\n\t" - : [val] "=&r" (val), [tmp] "=&r" (tmp) - : [add_val] "r" (add_value), [dest] "r" (dest) - : "memory"); - return val; -#else - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -#endif -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); -} - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - Atomic::add_ptr(1, dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - Atomic::add_ptr(-1, dest); -} - -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} - -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} - - -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { #ifdef AARCH64 - jint old_val; + int32_t old_val; int tmp; __asm__ volatile( "1:\n\t" @@ -176,33 +116,12 @@ #endif } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { -#ifdef AARCH64 - intptr_t old_val; - int tmp; - __asm__ volatile( - "1:\n\t" - " ldaxr %[old_val], [%[dest]]\n\t" - " stlxr %w[tmp], %[new_val], [%[dest]]\n\t" - " cbnz %w[tmp], 1b\n\t" - : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) - : [new_val] "r" (exchange_value), [dest] "r" (dest) - : "memory"); - return old_val; -#else - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); -#endif -} - -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering -inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 - jint rv; + int32_t rv; int tmp; __asm__ volatile( "1:\n\t" @@ -225,7 +144,8 @@ #endif } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 jlong rv; int tmp; @@ -245,21 +165,43 @@ : "memory"); return rv; #else - assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!"); + assert(VM_Version::supports_cx8(), "64 bit atomic compare and exchange not supported on this architecture!"); return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); #endif } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -#else - return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -#endif +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { + intptr_t val; + int tmp; + __asm__ volatile( + "1:\n\t" + " ldaxr %[val], [%[dest]]\n\t" + " add %[val], %[val], %[add_val]\n\t" + " stlxr %w[tmp], %[val], [%[dest]]\n\t" + " cbnz %w[tmp], 1b\n\t" + : [val] "=&r" (val), [tmp] "=&r" (tmp) + : [add_val] "r" (add_value), [dest] "r" (dest) + : "memory"); + return val; } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { + intptr_t old_val; + int tmp; + __asm__ volatile( + "1:\n\t" + " ldaxr %[old_val], [%[dest]]\n\t" + " stlxr %w[tmp], %[new_val], [%[dest]]\n\t" + " cbnz %w[tmp], 1b\n\t" + : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) + : [new_val] "r" (exchange_value), [dest] "r" (dest) + : "memory"); + return old_val; } +#endif + #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP