--- old/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:58:25.212247935 +0100 +++ new/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:58:24.708239425 +0100 @@ -79,13 +79,13 @@ // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. - template - inline static void store(T store_value, volatile D* dest); + template + inline static void store(volatile D* dest, T store_value); - template + template inline static void release_store(volatile D* dest, T store_value); - template + template inline static void release_store_fence(volatile D* dest, T store_value); // Atomically load from a location @@ -100,12 +100,12 @@ // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest - template - inline static D add(I add_value, D volatile* dest, + template + inline static D add(D volatile* dest, I add_value, atomic_memory_order order = memory_order_conservative); - template - inline static D sub(I sub_value, D volatile* dest, + template + inline static D sub(D volatile* dest, I sub_value, atomic_memory_order order = memory_order_conservative); // Atomically increment location. inc() provide: @@ -132,8 +132,8 @@ // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. - template - inline static D xchg(T exchange_value, volatile D* dest, + template + inline static D xchg(volatile D* dest, T exchange_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and compare_value, and exchanges @@ -141,10 +141,10 @@ // value of *dest. cmpxchg*() provide: // compare-and-exchange - template - inline static D cmpxchg(T exchange_value, - D volatile* dest, + template + inline static D cmpxchg(D volatile* dest, U compare_value, + T exchange_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and NULL, and replaces *dest @@ -152,8 +152,8 @@ // the comparison succeeded and the exchange occurred. This is // often used as part of lazy initialization, as a lock-free // alternative to the Double-Checked Locking Pattern. - template - inline static bool replace_if_null(T* value, D* volatile* dest, + template + inline static bool replace_if_null(D* volatile* dest, T* value, atomic_memory_order order = memory_order_conservative); private: @@ -168,7 +168,7 @@ // Dispatch handler for store. Provides type-based validity // checking and limited conversions around calls to the platform- // specific implementation layer provided by PlatformOp. - template + template struct StoreImpl; // Platform-specific implementation of store. Support for sizes @@ -224,7 +224,7 @@ // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific // implementation layer provided by PlatformAdd. - template + template struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 @@ -239,7 +239,7 @@ // - platform_add is an object of type PlatformAdd. // // Then - // platform_add(add_value, dest) + // platform_add(dest, add_value) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define @@ -259,12 +259,12 @@ // otherwise, addend is add_value. // // FetchAndAdd requires the derived class to provide - // fetch_and_add(addend, dest) + // fetch_and_add(dest, addend) // atomically adding addend to the value of dest, and returning the // old value. // // AddAndFetch requires the derived class to provide - // add_and_fetch(addend, dest) + // add_and_fetch(dest, addend) // atomically adding addend to the value of dest, and returning the // new value. // @@ -286,14 +286,14 @@ // function. No scaling of add_value is performed when D is a pointer // type, so this function can be used to implement the support function // required by AddAndFetch. - template - static D add_using_helper(Fn fn, I add_value, D volatile* dest); + template + static D add_using_helper(Fn fn, D volatile* dest, I add_value); // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformCmpxchg. - template + template struct CmpxchgImpl; // Platform-specific implementation of cmpxchg. Support for sizes @@ -306,11 +306,11 @@ // - platform_cmpxchg is an object of type PlatformCmpxchg. // // Then - // platform_cmpxchg(exchange_value, dest, compare_value, order) + // platform_cmpxchg(dest, compare_value, exchange_value, order) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template - // T operator()(T, T volatile*, T, atomic_memory_order) const + // T operator()(T volatile*, T, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely @@ -326,9 +326,9 @@ // helper function. template static T cmpxchg_using_helper(Fn fn, - T exchange_value, T volatile* dest, - T compare_value); + T compare_value, + T exchange_value); // Support platforms that do not provide Read-Modify-Write // byte-level atomic access. To use, derive PlatformCmpxchg<1> from @@ -341,7 +341,7 @@ // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformXchg. - template + template struct XchgImpl; // Platform-specific implementation of xchg. Support for sizes @@ -353,11 +353,11 @@ // - platform_xchg is an object of type PlatformXchg. // // Then - // platform_xchg(exchange_value, dest) + // platform_xchg(dest, exchange_value) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template - // T operator()(T, T volatile*, T, atomic_memory_order) const + // T operator()(T volatile*, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely @@ -373,8 +373,8 @@ // helper function. template static T xchg_using_helper(Fn fn, - T exchange_value, - T volatile* dest); + T volatile* dest, + T exchange_value); }; template @@ -450,9 +450,9 @@ PlatformOp, typename EnableIf::value || IsRegisteredEnum::value>::type> { - void operator()(T new_value, T volatile* dest) const { + void operator()(T volatile* dest, T new_value) const { // Forward to the platform handler for the size of T. - PlatformOp()(new_value, dest); + PlatformOp()(dest, new_value); } }; @@ -461,16 +461,16 @@ // The new_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // new_value in the destination. -template +template struct Atomic::StoreImpl< - T*, D*, + D*, T*, PlatformOp, typename EnableIf::value>::type> { - void operator()(T* new_value, D* volatile* dest) const { + void operator()(D* volatile* dest, T* new_value) const { // Allow derived to base conversion, and adding cv-qualifiers. D* value = new_value; - PlatformOp()(value, dest); + PlatformOp()(dest, value); } }; @@ -486,12 +486,12 @@ PlatformOp, typename EnableIf::value>::type> { - void operator()(T new_value, T volatile* dest) const { + void operator()(T volatile* dest, T new_value) const { typedef PrimitiveConversions::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); - PlatformOp()(Translator::decay(new_value), - reinterpret_cast(dest)); + PlatformOp()(reinterpret_cast(dest), + Translator::decay(new_value)); } }; @@ -504,8 +504,8 @@ template struct Atomic::PlatformStore { template - void operator()(T new_value, - T volatile* dest) const { + void operator()(T volatile* dest, + T new_value) const { STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization (void)const_cast(*dest = new_value); } @@ -517,21 +517,21 @@ template struct Atomic::FetchAndAdd { - template - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template struct Atomic::AddAndFetch { - template - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer::value || IsIntegral::value); typedef typename Conditional::value, ptrdiff_t, D>::type I; - Atomic::add(I(1), dest, order); + Atomic::add(dest, I(1), order); } template @@ -540,11 +540,11 @@ typedef typename Conditional::value, ptrdiff_t, D>::type I; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) - Atomic::add(I(-1), dest, order); + Atomic::add(dest, I(-1), order); } -template -inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) { +template +inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) { STATIC_ASSERT(IsPointer::value || IsIntegral::value); STATIC_ASSERT(IsIntegral::value); // If D is a pointer type, use [u]intptr_t as the addend type, @@ -557,7 +557,7 @@ AddendType addend = sub_value; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) // In case AddendType is not signed. - return Atomic::add(-addend, dest, order); + return Atomic::add(dest, -addend, order); } // Define the class before including platform file, which may specialize @@ -568,9 +568,9 @@ template struct Atomic::PlatformCmpxchg { template - T operator()(T exchange_value, - T volatile* dest, + T operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const; }; @@ -579,9 +579,9 @@ // in this file, near the other definitions related to cmpxchg. struct Atomic::CmpxchgByteUsingInt { template - T operator()(T exchange_value, - T volatile* dest, + T operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const; }; @@ -593,8 +593,8 @@ template struct Atomic::PlatformXchg { template - T operator()(T exchange_value, - T volatile* dest, + T operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const; }; @@ -654,113 +654,113 @@ return LoadImpl >()(p); } -template -inline void Atomic::store(T store_value, volatile D* dest) { - StoreImpl >()(store_value, dest); +template +inline void Atomic::store(volatile D* dest, T store_value) { + StoreImpl >()(dest, store_value); } template struct Atomic::PlatformOrderedStore { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { ScopedFence f((void*)p); - Atomic::store(v, p); + Atomic::store(p, v); } }; -template +template inline void Atomic::release_store(volatile D* p, T v) { - StoreImpl >()(v, p); + StoreImpl >()(p, v); } -template +template inline void Atomic::release_store_fence(volatile D* p, T v) { - StoreImpl >()(v, p); + StoreImpl >()(p, v); } -template -inline D Atomic::add(I add_value, D volatile* dest, +template +inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { - return AddImpl()(add_value, dest, order); + return AddImpl()(dest, add_value, order); } -template +template struct Atomic::AddImpl< - I, D, + D, I, typename EnableIf::value && IsIntegral::value && (sizeof(I) <= sizeof(D)) && (IsSigned::value == IsSigned::value)>::type> { - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { D addend = add_value; - return PlatformAdd()(addend, dest, order); + return PlatformAdd()(dest, addend, order); } }; -template +template struct Atomic::AddImpl< - I, P*, + P*, I, typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> { - P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { + P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); typedef typename Conditional::value, intptr_t, uintptr_t>::type CI; CI addend = add_value; - return PlatformAdd()(addend, dest, order); + return PlatformAdd()(dest, addend, order); } }; template -template -inline D Atomic::FetchAndAdd::operator()(I add_value, D volatile* dest, +template +inline D Atomic::FetchAndAdd::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { I addend = add_value; // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { addend *= sizeof(typename RemovePointer::type); } - D old = static_cast(this)->fetch_and_add(addend, dest, order); + D old = static_cast(this)->fetch_and_add(dest, addend, order); return old + add_value; } template -template -inline D Atomic::AddAndFetch::operator()(I add_value, D volatile* dest, +template +inline D Atomic::AddAndFetch::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { add_value *= sizeof(typename RemovePointer::type); } - return static_cast(this)->add_and_fetch(add_value, dest, order); + return static_cast(this)->add_and_fetch(dest, add_value, order); } -template -inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { +template +inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(add_value), reinterpret_cast(dest))); } -template -inline D Atomic::cmpxchg(T exchange_value, - D volatile* dest, +template +inline D Atomic::cmpxchg(D volatile* dest, U compare_value, + T exchange_value, atomic_memory_order order) { - return CmpxchgImpl()(exchange_value, dest, compare_value, order); + return CmpxchgImpl()(dest, compare_value, exchange_value, order); } -template -inline bool Atomic::replace_if_null(T* value, D* volatile* dest, +template +inline bool Atomic::replace_if_null(D* volatile* dest, T* value, atomic_memory_order order) { // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. D* expected_null = NULL; - return expected_null == cmpxchg(value, dest, expected_null, order); + return expected_null == cmpxchg(dest, expected_null, value, order); } // Handle cmpxchg for integral and enum types. @@ -771,12 +771,12 @@ T, T, T, typename EnableIf::value || IsRegisteredEnum::value>::type> { - T operator()(T exchange_value, T volatile* dest, T compare_value, + T operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. - return PlatformCmpxchg()(exchange_value, - dest, + return PlatformCmpxchg()(dest, compare_value, + exchange_value, order); } }; @@ -790,21 +790,21 @@ // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. -template +template struct Atomic::CmpxchgImpl< - T*, D*, U*, + D*, U*, T*, typename EnableIf::value && IsSame::type, typename RemoveCV::type>::value>::type> { - D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, + D* operator()(D* volatile* dest, U* compare_value, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; // Don't care what the CV qualifiers for compare_value are, // but we need to match D* when calling platform support. D* old_value = const_cast(compare_value); - return PlatformCmpxchg()(new_value, dest, old_value, order); + return PlatformCmpxchg()(dest, old_value, new_value, order); } }; @@ -820,24 +820,24 @@ T, T, T, typename EnableIf::value>::type> { - T operator()(T exchange_value, T volatile* dest, T compare_value, + T operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( - cmpxchg(Translator::decay(exchange_value), - reinterpret_cast(dest), + cmpxchg(reinterpret_cast(dest), Translator::decay(compare_value), + Translator::decay(exchange_value), order)); } }; template inline T Atomic::cmpxchg_using_helper(Fn fn, - T exchange_value, T volatile* dest, - T compare_value) { + T compare_value, + T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(exchange_value), @@ -846,9 +846,9 @@ } template -inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); uint8_t canon_exchange_value = exchange_value; @@ -871,7 +871,7 @@ // ... except for the one byte we want to update reinterpret_cast(&new_value)[offset] = canon_exchange_value; - uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); + uint32_t res = cmpxchg(aligned_dest, cur, new_value, order); if (res == cur) break; // success // at least one byte in the int changed value, so update @@ -891,9 +891,9 @@ T, T, typename EnableIf::value || IsRegisteredEnum::value>::type> { - T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { + T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. - return PlatformXchg()(exchange_value, dest, order); + return PlatformXchg()(dest, exchange_value, order); } }; @@ -902,15 +902,15 @@ // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. -template +template struct Atomic::XchgImpl< - T*, D*, + D*, T*, typename EnableIf::value>::type> { - D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const { + D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; - return PlatformXchg()(new_value, dest, order); + return PlatformXchg()(dest, new_value, order); } }; @@ -926,30 +926,31 @@ T, T, typename EnableIf::value>::type> { - T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { + T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( - xchg(Translator::decay(exchange_value), - reinterpret_cast(dest), + xchg(reinterpret_cast(dest), + Translator::decay(exchange_value), order)); } }; template inline T Atomic::xchg_using_helper(Fn fn, - T exchange_value, - T volatile* dest) { + T volatile* dest, + T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); + // Notice the swapped order of arguments. Change when/if stubs are rewritten. return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(exchange_value), reinterpret_cast(dest))); } -template -inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) { - return XchgImpl()(exchange_value, dest, order); +template +inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) { + return XchgImpl()(dest, exchange_value, order); } #endif // SHARE_RUNTIME_ATOMIC_HPP