--- old/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:56:49.662634036 +0100 +++ new/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:56:49.166625655 +0100 @@ -163,22 +163,22 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef ARM - return add_using_helper(arm_add_and_fetch, add_value, dest); + return add_using_helper(arm_add_and_fetch, dest, add_value); #else #ifdef M68K - return add_using_helper(m68k_add_and_fetch, add_value, dest); + return add_using_helper(m68k_add_and_fetch, dest, add_value); #else return __sync_add_and_fetch(dest, add_value); #endif // M68K @@ -186,8 +186,8 @@ } template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -197,15 +197,15 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM - return xchg_using_helper(arm_lock_test_and_set, exchange_value, dest); + return xchg_using_helper(arm_lock_test_and_set, dest, exchange_value); #else #ifdef M68K - return xchg_using_helper(m68k_lock_test_and_set, exchange_value, dest); + return xchg_using_helper(m68k_lock_test_and_set, dest, exchange_value); #else // __sync_lock_test_and_set is a bizarrely named atomic exchange // operation. Note that some platforms only support this with the @@ -224,8 +224,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T result = __sync_lock_test_and_set (dest, exchange_value); @@ -239,16 +239,16 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM - return cmpxchg_using_helper(arm_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(arm_compare_and_swap, dest, compare_value, exchange_value); #else #ifdef M68K - return cmpxchg_using_helper(m68k_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(m68k_compare_and_swap, dest, compare_value, exchange_value); #else return __sync_val_compare_and_swap(dest, compare_value, exchange_value); #endif // M68K @@ -257,9 +257,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); @@ -276,8 +276,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); os::atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest)); }