--- old/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:56:48.302611057 +0100 +++ new/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:56:47.874603824 +0100 @@ -31,13 +31,13 @@ struct Atomic::PlatformAdd : Atomic::FetchAndAdd > { - template - D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const; + template + D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -51,8 +51,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" @@ -64,9 +64,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" @@ -78,9 +78,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" @@ -92,8 +92,8 @@ #ifdef AMD64 template<> -template -inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -107,8 +107,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" @@ -120,9 +120,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" @@ -142,12 +142,12 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); - return cmpxchg_using_helper(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); + return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } template<> @@ -161,8 +161,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast(&store_value), reinterpret_cast(dest)); } @@ -173,7 +173,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) @@ -185,7 +185,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) @@ -197,7 +197,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) @@ -210,7 +210,7 @@ struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p)