--- old/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:53:17.531054357 +0100 +++ new/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:53:17.275050175 +0100 @@ -100,8 +100,8 @@ // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest - template - inline static D add(I add_value, D volatile* dest, + template + inline static D add(D volatile* dest, I add_value, atomic_memory_order order = memory_order_conservative); template @@ -224,7 +224,7 @@ // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific // implementation layer provided by PlatformAdd. - template + template struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 @@ -239,7 +239,7 @@ // - platform_add is an object of type PlatformAdd. // // Then - // platform_add(add_value, dest) + // platform_add(dest, add_value) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define @@ -259,12 +259,12 @@ // otherwise, addend is add_value. // // FetchAndAdd requires the derived class to provide - // fetch_and_add(addend, dest) + // fetch_and_add(dest, addend) // atomically adding addend to the value of dest, and returning the // old value. // // AddAndFetch requires the derived class to provide - // add_and_fetch(addend, dest) + // add_and_fetch(dest, addend) // atomically adding addend to the value of dest, and returning the // new value. // @@ -286,8 +286,8 @@ // function. No scaling of add_value is performed when D is a pointer // type, so this function can be used to implement the support function // required by AddAndFetch. - template - static D add_using_helper(Fn fn, I add_value, D volatile* dest); + template + static D add_using_helper(Fn fn, D volatile* dest, I add_value); // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the @@ -517,21 +517,21 @@ template struct Atomic::FetchAndAdd { - template - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template struct Atomic::AddAndFetch { - template - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer::value || IsIntegral::value); typedef typename Conditional::value, ptrdiff_t, D>::type I; - Atomic::add(I(1), dest, order); + Atomic::add(dest, I(1), order); } template @@ -540,7 +540,7 @@ typedef typename Conditional::value, ptrdiff_t, D>::type I; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) - Atomic::add(I(-1), dest, order); + Atomic::add(dest, I(-1), order); } template @@ -557,7 +557,7 @@ AddendType addend = sub_value; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) // In case AddendType is not signed. - return Atomic::add(-addend, dest, order); + return Atomic::add(dest, -addend, order); } // Define the class before including platform file, which may specialize @@ -678,68 +678,68 @@ StoreImpl >()(p, v); } -template -inline D Atomic::add(I add_value, D volatile* dest, +template +inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { - return AddImpl()(add_value, dest, order); + return AddImpl()(dest, add_value, order); } -template +template struct Atomic::AddImpl< - I, D, + D, I, typename EnableIf::value && IsIntegral::value && (sizeof(I) <= sizeof(D)) && (IsSigned::value == IsSigned::value)>::type> { - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { D addend = add_value; - return PlatformAdd()(addend, dest, order); + return PlatformAdd()(dest, addend, order); } }; -template +template struct Atomic::AddImpl< - I, P*, + P*, I, typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> { - P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { + P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); typedef typename Conditional::value, intptr_t, uintptr_t>::type CI; CI addend = add_value; - return PlatformAdd()(addend, dest, order); + return PlatformAdd()(dest, addend, order); } }; template -template -inline D Atomic::FetchAndAdd::operator()(I add_value, D volatile* dest, +template +inline D Atomic::FetchAndAdd::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { I addend = add_value; // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { addend *= sizeof(typename RemovePointer::type); } - D old = static_cast(this)->fetch_and_add(addend, dest, order); + D old = static_cast(this)->fetch_and_add(dest, addend, order); return old + add_value; } template -template -inline D Atomic::AddAndFetch::operator()(I add_value, D volatile* dest, +template +inline D Atomic::AddAndFetch::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { add_value *= sizeof(typename RemovePointer::type); } - return static_cast(this)->add_and_fetch(add_value, dest, order); + return static_cast(this)->add_and_fetch(dest, add_value, order); } -template -inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { +template +inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(add_value), reinterpret_cast(dest)));