< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page
rev 13429 : imported patch cmpxchg_template
rev 13433 : imported patch comments
rev 13435 : imported patch better_cmpxchg_translate
rev 13452 : [mq]: coleen_review1
rev 13454 : [mq]: roman_review1
rev 13455 : [mq]: replace_if_null
rev 13458 : imported patch cmpxchg_using_helper

*** 24,33 **** --- 24,38 ---- #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" + #include "metaprogramming/enableIf.hpp" + #include "metaprogramming/integerTypes.hpp" + #include "metaprogramming/isIntegral.hpp" + #include "metaprogramming/isSame.hpp" + #include "metaprogramming/removeCV.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" enum cmpxchg_memory_order { memory_order_relaxed,
*** 109,125 **** // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // <fence> compare-and-exchange <membar StoreLoad|StoreStore> ! inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative); ! inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative); ! // See comment above about using jlong atomics on 32-bit platforms ! inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative); ! inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative); ! inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative); ! inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative); }; // platform specific in-line definitions - must come before shared definitions #include OS_CPU_HEADER(atomic) --- 114,249 ---- // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // <fence> compare-and-exchange <membar StoreLoad|StoreStore> ! ! template<typename T, typename D, typename U> ! inline static D cmpxchg(T exchange_value, ! D volatile* dest, ! U compare_value, ! cmpxchg_memory_order order = memory_order_conservative); ! ! // Performs atomic compare of *dest and NULL, and replaces *dest ! // with exchange_value if the comparison succeeded. Returns true if ! // the comparison succeeded and the exchange occurred. This is ! // often used as part of lazy initialization, as a lock-free ! // alternative to the Double-Checked Locking Pattern. ! template<typename T, typename D> ! inline static bool replace_if_null(T* value, D* volatile* dest, ! cmpxchg_memory_order order = memory_order_conservative); ! ! inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, ! volatile intptr_t* dest, ! intptr_t compare_value, ! cmpxchg_memory_order order = memory_order_conservative) { ! return cmpxchg(exchange_value, dest, compare_value, order); ! } ! ! inline static void* cmpxchg_ptr(void* exchange_value, ! volatile void* dest, ! void* compare_value, ! cmpxchg_memory_order order = memory_order_conservative) { ! return cmpxchg(exchange_value, ! reinterpret_cast<void* volatile*>(dest), ! compare_value, ! order); ! } ! ! private: ! // Test whether From is implicitly convertible to To. ! // From and To must be pointer types. ! // Note: Provides the limited subset of C++11 std::is_convertible ! // that is needed here. ! template<typename From, typename To> struct IsPointerConvertible; ! ! // Dispatch handler for cmpxchg. Provides type-based validity ! // checking and limited conversions around calls to the ! // platform-specific implementation layer provided by ! // PlatformCmpxchg. ! template<typename T, typename D, typename U, typename Enable = void> ! struct CmpxchgImpl; ! ! // Platform-specific implementation of cmpxchg. Support for sizes ! // of 1, 4, and 8 are required. The class is a function object that ! // must be default constructable, with these requirements: ! // ! // - dest is of type T*. ! // - exchange_value and compare_value are of type T. ! // - order is of type cmpxchg_memory_order. ! // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. ! // ! // Then ! // platform_cmpxchg(exchange_value, dest, compare_value, order) ! // must be a valid expression, returning a result convertible to T. ! // ! // A default definition is provided, which declares a function template ! // T operator()(T, T volatile*, T, cmpxchg_memory_order) const ! // ! // For each required size, a platform must either provide an ! // appropriate definition of that function, or must entirely ! // specialize the class template for that size. ! template<size_t byte_size> struct PlatformCmpxchg; ! ! // Support for platforms that implement some variants of cmpxchg ! // using a (typically out of line) non-template helper function. ! // The generic arguments passed to PlatformCmpxchg need to be ! // translated to the appropriate type for the helper function, the ! // helper invoked on the translated arguments, and the result ! // translated back. Type is the parameter / return type of the ! // helper function. ! template<typename Type, typename Fn, typename T> ! static T cmpxchg_using_helper(Fn fn, ! T exchange_value, ! T volatile* dest, ! T compare_value); ! ! // Support platforms that do not provide Read-Modify-Write ! // byte-level atomic access. To use, derive PlatformCmpxchg<1> from ! // this class. ! public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. ! struct CmpxchgByteUsingInt; ! private: ! }; ! ! template<typename From, typename To> ! struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { ! // Determine whether From* is implicitly convertible to To*, using ! // the "sizeof trick". ! typedef char yes; ! typedef char (&no)[2]; ! ! static yes test(To*); ! static no test(...); ! static From* test_value; ! ! static const bool value = (sizeof(yes) == sizeof(test(test_value))); ! }; ! ! // Define the class before including platform file, which may specialize ! // the operator definition. No generic definition of specializations ! // of the operator template are provided, nor are there any generic ! // specializations of the class. The platform file is responsible for ! // providing those. ! template<size_t byte_size> ! struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC { ! template<typename T> ! T operator()(T exchange_value, ! T volatile* dest, ! T compare_value, ! cmpxchg_memory_order order) const; ! }; ! ! // Define the class before including platform file, which may use this ! // as a base class, requiring it be complete. The definition is later ! // in this file, near the other definitions related to cmpxchg. ! struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC { ! template<typename T> ! T operator()(T exchange_value, ! T volatile* dest, ! T compare_value, ! cmpxchg_memory_order order) const; }; // platform specific in-line definitions - must come before shared definitions #include OS_CPU_HEADER(atomic)
*** 141,205 **** inline void Atomic::dec(volatile size_t* dest) { dec_ptr((volatile intptr_t*) dest); } ! #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! /* ! * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg ! * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition ! * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific ! * implementation to be used instead. ! */ ! inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, ! jbyte compare_value, cmpxchg_memory_order order) { ! STATIC_ASSERT(sizeof(jbyte) == 1); ! volatile jint* dest_int = ! reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint))); ! size_t offset = pointer_delta(dest, dest_int, 1); ! jint cur = *dest_int; ! jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different ! cur_as_bytes[offset] = compare_value; // always execute a real cmpxchg so that we get the required memory // barriers even on initial failure do { // value to swap in matches current value ... ! jint new_value = cur; // ... except for the one jbyte we want to update ! reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value; ! jint res = cmpxchg(new_value, dest_int, cur, order); if (res == cur) break; // success ! // at least one jbyte in the jint changed value, so update ! // our view of the current jint cur = res; ! // if our jbyte is still as cur we loop and try again ! } while (cur_as_bytes[offset] == compare_value); ! return cur_as_bytes[offset]; } - #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE - inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); } - inline unsigned Atomic::cmpxchg(unsigned int exchange_value, - volatile unsigned int* dest, unsigned int compare_value, - cmpxchg_memory_order order) { - assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); - return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, - (jint)compare_value, order); - } - inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit // word, then we can do this with an atomic add of (add_value << 16) // to the 32-bit word. --- 265,420 ---- inline void Atomic::dec(volatile size_t* dest) { dec_ptr((volatile intptr_t*) dest); } ! template<typename T, typename D, typename U> ! inline D Atomic::cmpxchg(T exchange_value, ! D volatile* dest, ! U compare_value, ! cmpxchg_memory_order order) { ! return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order); ! } ! ! template<typename T, typename D> ! inline bool Atomic::replace_if_null(T* value, D* volatile* dest, ! cmpxchg_memory_order order) { ! // Presently using a trivial implementation in terms of cmpxchg. ! // Consider adding platform support, to permit the use of compiler ! // intrinsics like gcc's __sync_bool_compare_and_swap. ! D* expected_null = NULL; ! return expected_null == cmpxchg(value, dest, expected_null, order); ! } ! ! // Handle cmpxchg for integral and enum types. ! // ! // All the involved types must be identical. ! template<typename T> ! struct Atomic::CmpxchgImpl< ! T, T, T, ! typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> ! VALUE_OBJ_CLASS_SPEC ! { ! T operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { ! // Forward to the platform handler for the size of T. ! return PlatformCmpxchg<sizeof(T)>()(exchange_value, ! dest, ! compare_value, ! order); ! } ! }; ! ! // Handle cmpxchg for pointer types. ! // ! // The destination's type and the compare_value type must be the same, ! // ignoring cv-qualifiers; we don't care about the cv-qualifiers of ! // the compare_value. ! // ! // The exchange_value must be implicitly convertible to the ! // destination's type; it must be type-correct to store the ! // exchange_value in the destination. ! template<typename T, typename D, typename U> ! struct Atomic::CmpxchgImpl< ! T*, D*, U*, ! typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && ! IsSame<typename RemoveCV<D>::type, ! typename RemoveCV<U>::type>::value>::type> ! VALUE_OBJ_CLASS_SPEC ! { ! D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, ! cmpxchg_memory_order order) const { ! // Allow derived to base conversion, and adding cv-qualifiers. ! D* new_value = exchange_value; ! // Don't care what the CV qualifiers for compare_value are, ! // but we need to match D* when calling platform support. ! D* old_value = const_cast<D*>(compare_value); ! return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order); ! } ! }; ! ! // Handle cmpxchg for types that have a translator. ! // ! // All the involved types must be identical. ! // ! // This translates the original call into a call on the decayed ! // arguments, and returns the recovered result of that translated ! // call. ! template<typename T> ! struct Atomic::CmpxchgImpl< ! T, T, T, ! typename EnableIf<IntegerTypes::Translate<T>::value>::type> ! VALUE_OBJ_CLASS_SPEC ! { ! T operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { ! typedef IntegerTypes::Translate<T> Translator; ! typedef typename Translator::Decayed Decayed; ! STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); ! return Translator::recover( ! cmpxchg(Translator::decay(exchange_value), ! reinterpret_cast<Decayed volatile*>(dest), ! Translator::decay(compare_value), ! order)); ! } ! }; ! ! template<typename Type, typename Fn, typename T> ! inline T Atomic::cmpxchg_using_helper(Fn fn, ! T exchange_value, ! T volatile* dest, ! T compare_value) { ! STATIC_ASSERT(sizeof(Type) == sizeof(T)); ! return IntegerTypes::cast<T>( ! fn(IntegerTypes::cast<Type>(exchange_value), ! reinterpret_cast<Type volatile*>(dest), ! IntegerTypes::cast<Type>(compare_value))); ! } ! ! template<typename T> ! inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, ! T volatile* dest, ! T compare_value, ! cmpxchg_memory_order order) const { ! STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); ! uint8_t canon_exchange_value = exchange_value; ! uint8_t canon_compare_value = compare_value; ! volatile uint32_t* aligned_dest ! = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t))); ! size_t offset = pointer_delta(dest, aligned_dest, 1); ! uint32_t cur = *aligned_dest; ! uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different ! cur_as_bytes[offset] = canon_compare_value; // always execute a real cmpxchg so that we get the required memory // barriers even on initial failure do { // value to swap in matches current value ... ! uint32_t new_value = cur; // ... except for the one jbyte we want to update ! reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; ! uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); if (res == cur) break; // success ! // at least one byte in the int changed value, so update ! // our view of the current int cur = res; ! // if our byte is still as cur we loop and try again ! } while (cur_as_bytes[offset] == canon_compare_value); ! return IntegerTypes::cast<T>(cur_as_bytes[offset]); } inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); } inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit // word, then we can do this with an atomic add of (add_value << 16) // to the 32-bit word.
< prev index next >