--- old/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:56:57.186761161 +0100 +++ new/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:56:56.762753997 +0100 @@ -57,33 +57,33 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; #ifdef AMD64 template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_using_helper(os::atomic_add_func, add_value, dest); + return add_using_helper(os::atomic_add_func, dest, add_value); } template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_using_helper(os::atomic_add_long_func, add_value, dest); + return add_using_helper(os::atomic_add_long_func, dest, add_value); } #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ template<> \ template \ - inline T Atomic::PlatformXchg::operator()(T exchange_value, \ - T volatile* dest, \ + inline T Atomic::PlatformXchg::operator()(T volatile* dest, \ + T exchange_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(ByteSize == sizeof(T)); \ - return xchg_using_helper(StubName, exchange_value, dest); \ + return xchg_using_helper(StubName, dest, exchange_value); \ } DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) @@ -91,15 +91,15 @@ #undef DEFINE_STUB_XCHG -#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ - template<> \ - template \ - inline T Atomic::PlatformCmpxchg::operator()(T exchange_value, \ - T volatile* dest, \ - T compare_value, \ +#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ + template<> \ + template \ + inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, \ + T compare_value, \ + T exchange_value, \ atomic_memory_order order) const { \ - STATIC_ASSERT(ByteSize == sizeof(T)); \ - return cmpxchg_using_helper(StubName, exchange_value, dest, compare_value); \ + STATIC_ASSERT(ByteSize == sizeof(T)); \ + return cmpxchg_using_helper(StubName, dest, compare_value, exchange_value); \ } DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) @@ -111,8 +111,8 @@ #else // !AMD64 template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -127,8 +127,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedExchange @@ -141,9 +141,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // alternative for InterlockedCompareExchange @@ -157,9 +157,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedCompareExchange @@ -173,9 +173,9 @@ template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); int32_t ex_lo = (int32_t)exchange_value; @@ -213,8 +213,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); volatile T* src = &store_value; __asm { @@ -234,7 +234,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov al, v; @@ -247,7 +247,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov ax, v; @@ -260,7 +260,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov eax, v;