# HG changeset patch # User mdoerr # Date 1524238504 -7200 # Fri Apr 20 17:35:04 2018 +0200 # Node ID c7ac973d0ccfa8889851036d975807fa412a4fa6 # Parent fa6f8bce6490d0d065514ba887557671d1ca0dea 8202080: Introduce ordering semantics for Atomic::add Reviewed-by: diff --git a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp --- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp +++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp @@ -77,56 +77,83 @@ #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" +inline void pre_membar(cmpxchg_memory_order order) { + switch (order) { + case memory_order_relaxed: + case memory_order_acquire: break; + case memory_order_release: + case memory_order_acq_rel: __asm__ __volatile__ (strasm_lwsync); break; + default : __asm__ __volatile__ (strasm_sync); break; + } +} + +inline void post_membar(cmpxchg_memory_order order) { + switch (order) { + case memory_order_relaxed: + case memory_order_release: break; + case memory_order_acquire: + case memory_order_acq_rel: __asm__ __volatile__ (strasm_isync); break; + default : __asm__ __volatile__ (strasm_sync); break; + } +} + + template struct Atomic::PlatformAdd : Atomic::AddAndFetch > { template - D add_and_fetch(I add_value, D volatile* dest) const; + D add_and_fetch(I add_value, D volatile* dest, cmpxchg_memory_order order) const; }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, + cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D result; + pre_membar(order); + __asm__ __volatile__ ( - strasm_lwsync "1: lwarx %0, 0, %2 \n" " add %0, %0, %1 \n" " stwcx. %0, 0, %2 \n" " bne- 1b \n" - strasm_isync : /*%0*/"=&r" (result) : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); + post_membar(order); + return result; } template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, + cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D result; + pre_membar(order); + __asm__ __volatile__ ( - strasm_lwsync "1: ldarx %0, 0, %2 \n" " add %0, %0, %1 \n" " stdcx. %0, 0, %2 \n" " bne- 1b \n" - strasm_isync : /*%0*/"=&r" (result) : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); + post_membar(order); + return result; } @@ -207,24 +234,6 @@ return old_value; } -inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { - if (order != memory_order_relaxed) { - __asm__ __volatile__ ( - /* fence */ - strasm_sync - ); - } -} - -inline void cmpxchg_post_membar(cmpxchg_memory_order order) { - if (order != memory_order_relaxed) { - __asm__ __volatile__ ( - /* fence */ - strasm_sync - ); - } -} - template<> template inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, @@ -251,7 +260,7 @@ unsigned int old_value, value32; - cmpxchg_pre_membar(order); + pre_membar(order); __asm__ __volatile__ ( /* simple guard */ @@ -290,7 +299,7 @@ "memory" ); - cmpxchg_post_membar(order); + post_membar(order); return PrimitiveConversions::cast((unsigned char)old_value); } @@ -310,7 +319,7 @@ T old_value; const uint64_t zero = 0; - cmpxchg_pre_membar(order); + pre_membar(order); __asm__ __volatile__ ( /* simple guard */ @@ -340,7 +349,7 @@ "memory" ); - cmpxchg_post_membar(order); + post_membar(order); return old_value; } @@ -360,7 +369,7 @@ T old_value; const uint64_t zero = 0; - cmpxchg_pre_membar(order); + pre_membar(order); __asm__ __volatile__ ( /* simple guard */ @@ -390,7 +399,7 @@ "memory" ); - cmpxchg_post_membar(order); + post_membar(order); return old_value; } diff --git a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp --- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp +++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp @@ -73,12 +73,13 @@ : Atomic::AddAndFetch > { template - D add_and_fetch(I add_value, D volatile* dest) const; + D add_and_fetch(I add_value, D volatile* dest, cmpxchg_memory_order order) const; }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const { +inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest, + cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -106,6 +107,9 @@ //---< clobbered >--- : "cc", "r0", "r2", "r3", "memory" ); + if (order == memory_order_conservative) { + __asm__ __volatile__ ("bcr 14, 0" : : : "memory"); + } } else { __asm__ __volatile__ ( " LLGF %[old],%[mem] \n\t" // get old value @@ -129,7 +133,8 @@ template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const { +inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest, + cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -157,6 +162,9 @@ //---< clobbered >--- : "cc", "r0", "r2", "r3", "memory" ); + if (order == memory_order_conservative) { + __asm__ __volatile__ ("bcr 14, 0" : : : "memory"); + } } else { __asm__ __volatile__ ( " LG %[old],%[mem] \n\t" // get old value diff --git a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp --- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp @@ -32,12 +32,13 @@ : Atomic::FetchAndAdd > { template - D fetch_and_add(I add_value, D volatile* dest) const; + D fetch_and_add(I add_value, D volatile* dest, cmpxchg_memory_order order) const; }; template<> template -inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, + cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -92,7 +93,8 @@ template<> template -inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { +inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, + cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; @@ -105,8 +107,7 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest) const { +inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -38,7 +38,10 @@ #include "utilities/macros.hpp" enum cmpxchg_memory_order { - memory_order_relaxed, + memory_order_relaxed = 0, + memory_order_acquire = 2, + memory_order_release = 3, + memory_order_acq_rel = 4, // Use value which doesn't interfere with C++2011. We need to be more conservative. memory_order_conservative = 8 }; @@ -80,7 +83,8 @@ // add-value-to-dest template - inline static D add(I add_value, D volatile* dest); + inline static D add(I add_value, D volatile* dest, + cmpxchg_memory_order order = memory_order_acq_rel); template inline static D sub(I sub_value, D volatile* dest); @@ -488,13 +492,13 @@ template struct Atomic::FetchAndAdd { template - D operator()(I add_value, D volatile* dest) const; + D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const; }; template struct Atomic::AddAndFetch { template - D operator()(I add_value, D volatile* dest) const; + D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const; }; template @@ -589,8 +593,9 @@ } template -inline D Atomic::add(I add_value, D volatile* dest) { - return AddImpl()(add_value, dest); +inline D Atomic::add(I add_value, D volatile* dest, + cmpxchg_memory_order order) { + return AddImpl()(add_value, dest, order); } template @@ -601,9 +606,9 @@ (sizeof(I) <= sizeof(D)) && (IsSigned::value == IsSigned::value)>::type> { - D operator()(I add_value, D volatile* dest) const { + D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const { D addend = add_value; - return PlatformAdd()(addend, dest); + return PlatformAdd()(addend, dest, order); } }; @@ -612,14 +617,14 @@ I, P*, typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> { - P* operator()(I add_value, P* volatile* dest) const { + P* operator()(I add_value, P* volatile* dest, cmpxchg_memory_order order) const { STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); typedef typename Conditional::value, intptr_t, uintptr_t>::type CI; CI addend = add_value; - return PlatformAdd()(addend, dest); + return PlatformAdd()(addend, dest, order); } }; @@ -634,13 +639,13 @@ // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. template<> struct Atomic::AddImpl { - short operator()(short add_value, short volatile* dest) const { + short operator()(short add_value, short volatile* dest, cmpxchg_memory_order order) const { #ifdef VM_LITTLE_ENDIAN assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); - int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1)); + int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order); #else assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); - int new_value = Atomic::add(add_value << 16, (volatile int*)(dest)); + int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order); #endif return (short)(new_value >> 16); // preserves sign } @@ -648,24 +653,26 @@ template template -inline D Atomic::FetchAndAdd::operator()(I add_value, D volatile* dest) const { +inline D Atomic::FetchAndAdd::operator()(I add_value, D volatile* dest, + cmpxchg_memory_order order) const { I addend = add_value; // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { addend *= sizeof(typename RemovePointer::type); } - D old = static_cast(this)->fetch_and_add(addend, dest); + D old = static_cast(this)->fetch_and_add(addend, dest, order); return old + add_value; } template template -inline D Atomic::AddAndFetch::operator()(I add_value, D volatile* dest) const { +inline D Atomic::AddAndFetch::operator()(I add_value, D volatile* dest, + cmpxchg_memory_order order) const { // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { add_value *= sizeof(typename RemovePointer::type); } - return static_cast(this)->add_and_fetch(add_value, dest); + return static_cast(this)->add_and_fetch(add_value, dest, order); } template