--- old/src/share/vm/runtime/atomic.hpp 2017-08-16 01:19:14.378525136 -0400 +++ new/src/share/vm/runtime/atomic.hpp 2017-08-16 01:19:14.266519391 -0400 @@ -26,6 +26,11 @@ #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" +#include "metaprogramming/enableIf.hpp" +#include "metaprogramming/integerTypes.hpp" +#include "metaprogramming/isIntegral.hpp" +#include "metaprogramming/isSame.hpp" +#include "metaprogramming/removeCV.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -111,13 +116,132 @@ // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // compare-and-exchange - inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative); - // See comment above about using jlong atomics on 32-bit platforms - inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative); + + template + inline static D cmpxchg(T exchange_value, + D volatile* dest, + U compare_value, + cmpxchg_memory_order order = memory_order_conservative); + + // Performs atomic compare of *dest and NULL, and replaces *dest + // with exchange_value if the comparison succeeded. Returns true if + // the comparison succeeded and the exchange occurred. This is + // often used as part of lazy initialization, as a lock-free + // alternative to the Double-Checked Locking Pattern. + template + inline static bool replace_if_null(T* value, D* volatile* dest, + cmpxchg_memory_order order = memory_order_conservative); + + inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, + volatile intptr_t* dest, + intptr_t compare_value, + cmpxchg_memory_order order = memory_order_conservative) { + return cmpxchg(exchange_value, dest, compare_value, order); + } + + inline static void* cmpxchg_ptr(void* exchange_value, + volatile void* dest, + void* compare_value, + cmpxchg_memory_order order = memory_order_conservative) { + return cmpxchg(exchange_value, + reinterpret_cast(dest), + compare_value, + order); + } + +private: + // Test whether From is implicitly convertible to To. + // From and To must be pointer types. + // Note: Provides the limited subset of C++11 std::is_convertible + // that is needed here. + template struct IsPointerConvertible; + + // Dispatch handler for cmpxchg. Provides type-based validity + // checking and limited conversions around calls to the + // platform-specific implementation layer provided by + // PlatformCmpxchg. + template + struct CmpxchgImpl; + + // Platform-specific implementation of cmpxchg. Support for sizes + // of 1, 4, and 8 are required. The class is a function object that + // must be default constructable, with these requirements: + // + // - dest is of type T*. + // - exchange_value and compare_value are of type T. + // - order is of type cmpxchg_memory_order. + // - platform_cmpxchg is an object of type PlatformCmpxchg. + // + // Then + // platform_cmpxchg(exchange_value, dest, compare_value, order) + // must be a valid expression, returning a result convertible to T. + // + // A default definition is provided, which declares a function template + // T operator()(T, T volatile*, T, cmpxchg_memory_order) const + // + // For each required size, a platform must either provide an + // appropriate definition of that function, or must entirely + // specialize the class template for that size. + template struct PlatformCmpxchg; + + // Support for platforms that implement some variants of cmpxchg + // using a (typically out of line) non-template helper function. + // The generic arguments passed to PlatformCmpxchg need to be + // translated to the appropriate type for the helper function, the + // helper invoked on the translated arguments, and the result + // translated back. Type is the parameter / return type of the + // helper function. + template + static T cmpxchg_using_helper(Fn fn, + T exchange_value, + T volatile* dest, + T compare_value); + + // Support platforms that do not provide Read-Modify-Write + // byte-level atomic access. To use, derive PlatformCmpxchg<1> from + // this class. +public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. + struct CmpxchgByteUsingInt; +private: +}; + +template +struct Atomic::IsPointerConvertible : AllStatic { + // Determine whether From* is implicitly convertible to To*, using + // the "sizeof trick". + typedef char yes; + typedef char (&no)[2]; + + static yes test(To*); + static no test(...); + static From* test_value; + + static const bool value = (sizeof(yes) == sizeof(test(test_value))); +}; + +// Define the class before including platform file, which may specialize +// the operator definition. No generic definition of specializations +// of the operator template are provided, nor are there any generic +// specializations of the class. The platform file is responsible for +// providing those. +template +struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC { + template + T operator()(T exchange_value, + T volatile* dest, + T compare_value, + cmpxchg_memory_order order) const; +}; + +// Define the class before including platform file, which may use this +// as a base class, requiring it be complete. The definition is later +// in this file, near the other definitions related to cmpxchg. +struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC { + template + T operator()(T exchange_value, + T volatile* dest, + T compare_value, + cmpxchg_memory_order order) const; }; // platform specific in-line definitions - must come before shared definitions @@ -143,61 +267,152 @@ dec_ptr((volatile intptr_t*) dest); } -#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE -/* - * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg - * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition - * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific - * implementation to be used instead. - */ -inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, - jbyte compare_value, cmpxchg_memory_order order) { - STATIC_ASSERT(sizeof(jbyte) == 1); - volatile jint* dest_int = - reinterpret_cast(align_down(dest, sizeof(jint))); - size_t offset = pointer_delta(dest, dest_int, 1); - jint cur = *dest_int; - jbyte* cur_as_bytes = reinterpret_cast(&cur); +template +inline D Atomic::cmpxchg(T exchange_value, + D volatile* dest, + U compare_value, + cmpxchg_memory_order order) { + return CmpxchgImpl()(exchange_value, dest, compare_value, order); +} + +template +inline bool Atomic::replace_if_null(T* value, D* volatile* dest, + cmpxchg_memory_order order) { + // Presently using a trivial implementation in terms of cmpxchg. + // Consider adding platform support, to permit the use of compiler + // intrinsics like gcc's __sync_bool_compare_and_swap. + D* expected_null = NULL; + return expected_null == cmpxchg(value, dest, expected_null, order); +} + +// Handle cmpxchg for integral and enum types. +// +// All the involved types must be identical. +template +struct Atomic::CmpxchgImpl< + T, T, T, + typename EnableIf::value || IsRegisteredEnum::value>::type> + VALUE_OBJ_CLASS_SPEC +{ + T operator()(T exchange_value, T volatile* dest, T compare_value, + cmpxchg_memory_order order) const { + // Forward to the platform handler for the size of T. + return PlatformCmpxchg()(exchange_value, + dest, + compare_value, + order); + } +}; + +// Handle cmpxchg for pointer types. +// +// The destination's type and the compare_value type must be the same, +// ignoring cv-qualifiers; we don't care about the cv-qualifiers of +// the compare_value. +// +// The exchange_value must be implicitly convertible to the +// destination's type; it must be type-correct to store the +// exchange_value in the destination. +template +struct Atomic::CmpxchgImpl< + T*, D*, U*, + typename EnableIf::value && + IsSame::type, + typename RemoveCV::type>::value>::type> + VALUE_OBJ_CLASS_SPEC +{ + D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, + cmpxchg_memory_order order) const { + // Allow derived to base conversion, and adding cv-qualifiers. + D* new_value = exchange_value; + // Don't care what the CV qualifiers for compare_value are, + // but we need to match D* when calling platform support. + D* old_value = const_cast(compare_value); + return PlatformCmpxchg()(new_value, dest, old_value, order); + } +}; + +// Handle cmpxchg for types that have a translator. +// +// All the involved types must be identical. +// +// This translates the original call into a call on the decayed +// arguments, and returns the recovered result of that translated +// call. +template +struct Atomic::CmpxchgImpl< + T, T, T, + typename EnableIf::value>::type> + VALUE_OBJ_CLASS_SPEC +{ + T operator()(T exchange_value, T volatile* dest, T compare_value, + cmpxchg_memory_order order) const { + typedef IntegerTypes::Translate Translator; + typedef typename Translator::Decayed Decayed; + STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); + return Translator::recover( + cmpxchg(Translator::decay(exchange_value), + reinterpret_cast(dest), + Translator::decay(compare_value), + order)); + } +}; + +template +inline T Atomic::cmpxchg_using_helper(Fn fn, + T exchange_value, + T volatile* dest, + T compare_value) { + STATIC_ASSERT(sizeof(Type) == sizeof(T)); + return IntegerTypes::cast( + fn(IntegerTypes::cast(exchange_value), + reinterpret_cast(dest), + IntegerTypes::cast(compare_value))); +} + +template +inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, + T volatile* dest, + T compare_value, + cmpxchg_memory_order order) const { + STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); + uint8_t canon_exchange_value = exchange_value; + uint8_t canon_compare_value = compare_value; + volatile uint32_t* aligned_dest + = reinterpret_cast(align_down(dest, sizeof(uint32_t))); + size_t offset = pointer_delta(dest, aligned_dest, 1); + uint32_t cur = *aligned_dest; + uint8_t* cur_as_bytes = reinterpret_cast(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different - cur_as_bytes[offset] = compare_value; + cur_as_bytes[offset] = canon_compare_value; // always execute a real cmpxchg so that we get the required memory // barriers even on initial failure do { // value to swap in matches current value ... - jint new_value = cur; + uint32_t new_value = cur; // ... except for the one jbyte we want to update - reinterpret_cast(&new_value)[offset] = exchange_value; + reinterpret_cast(&new_value)[offset] = canon_exchange_value; - jint res = cmpxchg(new_value, dest_int, cur, order); - if (res == cur) break; // success + uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); + if (res == cur) break; // success - // at least one jbyte in the jint changed value, so update - // our view of the current jint + // at least one byte in the int changed value, so update + // our view of the current int cur = res; - // if our jbyte is still as cur we loop and try again - } while (cur_as_bytes[offset] == compare_value); + // if our byte is still as cur we loop and try again + } while (cur_as_bytes[offset] == canon_compare_value); - return cur_as_bytes[offset]; + return IntegerTypes::cast(cur_as_bytes[offset]); } -#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE - inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); } -inline unsigned Atomic::cmpxchg(unsigned int exchange_value, - volatile unsigned int* dest, unsigned int compare_value, - cmpxchg_memory_order order) { - assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); - return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, - (jint)compare_value, order); -} - inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit