--- old/src/share/vm/runtime/atomic.hpp 2017-07-27 17:46:46.603146581 +0200 +++ new/src/share/vm/runtime/atomic.hpp 2017-07-27 17:46:46.451146586 +0200 @@ -26,7 +26,13 @@ #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" +#include "metaprogramming/conditional.hpp" +#include "metaprogramming/integerTypes.hpp" +#include "metaprogramming/isDerived.hpp" +#include "metaprogramming/isIntegral.hpp" +#include "metaprogramming/isPointer.hpp" #include "utilities/align.hpp" +#include "utilities/debug.hpp" #include "utilities/macros.hpp" enum cmpxchg_memory_order { @@ -35,10 +41,94 @@ memory_order_conservative = 8 }; +class GeneralizedAtomic : AllStatic { + template class Never: public FalseType {}; + + template + inline static void specialized_store(T store_value, volatile T* dest) { + STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses? + (void)const_cast(*dest = store_value); + } + + template + inline static T specialized_load(const volatile T* dest) { + STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses? + return *dest; + } + + template + inline static T specialized_add(T add_value, volatile T* dest) { + STATIC_ASSERT(Never::value); + return add_value; + } + + template + inline static void specialized_inc(volatile T* dest) { + add(1, dest); + } + + template + inline static void specialized_dec(volatile T* dest) { + add(-1, dest); + } + + template + inline static T specialized_xchg(T exchange_value, volatile T* dest) { + STATIC_ASSERT(Never::value); + return exchange_value; + } + + template + inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_ASSERT(Never::value); + return exchange_value; + } + +public: + template + inline static void store(T store_value, volatile U* dest); + + template + inline static T load(volatile T* src); + + template + inline static U add(T add_value, volatile U* dst); + + template + inline static U* add(T add_value, U* volatile* dst); + + template + inline static void inc(volatile T* dest); + + template + inline static void inc(T* volatile* dest); + + template + inline static void dec(volatile T* dest); + + template + inline static void dec(T* volatile* dest); + + template + inline static U xchg(T exchange_value, volatile U* dest); + + template + inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order); +}; + + +// platform specific in-line definitions - must come before shared definitions + +class PlatformAtomic; + +#include OS_CPU_HEADER(atomic) + +typedef Conditional::value, PlatformAtomic, GeneralizedAtomic>::type AtomicImpl; + class Atomic : AllStatic { public: - // Atomic operations on jlong types are not available on all 32-bit - // platforms. If atomic ops on jlongs are defined here they must only + // Atomic operations on 64-bit types are not available on all 32-bit + // platforms. If atomic ops on 64-bit types are defined here they must only // be used from code that verifies they are available at runtime and // can provide an alternative action if not - see supports_cx8() for // a means to test availability. @@ -56,108 +146,250 @@ // we can prove that a weaker form is sufficiently safe. // Atomically store to a location - inline static void store (jbyte store_value, jbyte* dest); - inline static void store (jshort store_value, jshort* dest); - inline static void store (jint store_value, jint* dest); - // See comment above about using jlong atomics on 32-bit platforms - inline static void store (jlong store_value, jlong* dest); - inline static void store_ptr(intptr_t store_value, intptr_t* dest); - inline static void store_ptr(void* store_value, void* dest); - - inline static void store (jbyte store_value, volatile jbyte* dest); - inline static void store (jshort store_value, volatile jshort* dest); - inline static void store (jint store_value, volatile jint* dest); - // See comment above about using jlong atomics on 32-bit platforms - inline static void store (jlong store_value, volatile jlong* dest); - inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest); - inline static void store_ptr(void* store_value, volatile void* dest); - - // See comment above about using jlong atomics on 32-bit platforms - inline static jlong load(const volatile jlong* src); + // See comment above about using 64-bit atomics on 32-bit platforms + template + inline static void store(T store_value, volatile U* dest); + + // The store_ptr() member functions are deprecated. Use store() instead. + static void store_ptr(intptr_t store_value, volatile intptr_t* dest) { + store(store_value, dest); + } + + static void store_ptr(void* store_value, volatile void* dest) { + store((intptr_t)store_value, (volatile intptr_t*)dest); + } + + // Atomically load from a location + // See comment above about using 64-bit atomics on 32-bit platforms + template + inline static T load(volatile T* src); // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest - inline static jshort add (jshort add_value, volatile jshort* dest); - inline static jint add (jint add_value, volatile jint* dest); - inline static size_t add (size_t add_value, volatile size_t* dest); - inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); - inline static void* add_ptr(intptr_t add_value, volatile void* dest); + // add(I1 v, I* d) + // add(I1 v, P* d) + // where I, I1 are integral types, P is a pointer type. + // Functional behavior is modelled on *dest += add_value. + template + inline static U add(T add_value, volatile U* dst); + + // The add_ptr() member functions are deprecated. Use add() instead. + static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) { + return add(add_value, dest); + } + + static void* add_ptr(intptr_t add_value, volatile void* dest) { + return (void*)add(add_value, (volatile intptr_t*)dest); + } // Atomically increment location. inc*() provide: // increment-dest - inline static void inc (volatile jint* dest); - inline static void inc (volatile jshort* dest); - inline static void inc (volatile size_t* dest); - inline static void inc_ptr(volatile intptr_t* dest); - inline static void inc_ptr(volatile void* dest); + // Functional behavior is modelled on *dest++ + template + inline static void inc(volatile T* dest); + + // The inc_ptr member functions are deprecated. Use inc() instead. + static void inc_ptr(volatile intptr_t* dest) { + inc(dest); + } + + static void inc_ptr(volatile void* dest) { + inc((volatile intptr_t*)dest); + } // Atomically decrement a location. dec*() provide: // decrement-dest - inline static void dec (volatile jint* dest); - inline static void dec (volatile jshort* dest); - inline static void dec (volatile size_t* dest); - inline static void dec_ptr(volatile intptr_t* dest); - inline static void dec_ptr(volatile void* dest); + // Functional behavior is modelled on *dest-- + template + inline static void dec(volatile T* dest); + + // The dec_ptr member functions are deprecated. Use dec() instead. + static void dec_ptr(volatile intptr_t* dest) { + dec(dest); + } + + static void dec_ptr(volatile void* dest) { + dec((volatile intptr_t*)dest); + } // Performs atomic exchange of *dest with exchange_value. Returns old // prior value of *dest. xchg*() provide: // exchange-value-with-dest - inline static jint xchg (jint exchange_value, volatile jint* dest); - inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest); - inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest); - inline static void* xchg_ptr(void* exchange_value, volatile void* dest); + template + inline static U xchg(T exchange_value, volatile U* dest); + + // The xchg_ptr() member functions are deprecated. Use xchg() instead. + static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { + return xchg(exchange_value, dest); + } + + static void* xchg_ptr(void* exchange_value, volatile void* dest) { + return (void*)xchg((intptr_t)exchange_value, (volatile intptr_t*)dest); + } // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // compare-and-exchange - inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative); - // See comment above about using jlong atomics on 32-bit platforms - inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative); + // See comment above about using 64-bit atomics on 32-bit platforms + template + inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order = memory_order_conservative); + + // The cmpxchg_ptr member functions are deprecated. Use cmpxchg() instead. + inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, + intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) { + return cmpxchg(exchange_value, dest, compare_value, order); + } + + inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, + void* compare_value, cmpxchg_memory_order order = memory_order_conservative) { + return (void*)cmpxchg((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); + } }; -// platform specific in-line definitions - must come before shared definitions +// internal implementation -#include OS_CPU_HEADER(atomic) +template +inline void GeneralizedAtomic::store(T store_value, volatile U* dest) { + typedef typename IntegerTypes::Signed::type Raw; + U store_value_cast = store_value; + specialized_store(IntegerTypes::cast_to_signed(store_value_cast), reinterpret_cast(dest)); +} -// shared in-line definitions +template +inline T GeneralizedAtomic::load(volatile T* src) { + typedef typename IntegerTypes::Signed::type Raw; + return IntegerTypes::cast(specialized_load(reinterpret_cast(src))); +} -// size_t casts... -#if (SIZE_MAX != UINTPTR_MAX) -#error size_t is not WORD_SIZE, interesting platform, but missing implementation here -#endif +template +inline U GeneralizedAtomic::add(T add_value, volatile U* dst) { + STATIC_ASSERT(IsIntegral::value); + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + // Allow -Wconversion or the like to complain about unsafe conversions. + U value = add_value; + Raw raw_value = IntegerTypes::cast_to_signed(value); + Raw result = specialized_add(raw_value, reinterpret_cast(dst)); + return IntegerTypes::cast(result); +} + +template +inline U* GeneralizedAtomic::add(T add_value, U* volatile* dst) { + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + ptrdiff_t value = add_value; + Raw raw_value = IntegerTypes::cast_to_signed(value * sizeof(U)); + Raw result = specialized_add(raw_value, reinterpret_cast(dst)); + return IntegerTypes::cast(result); +} + +template +inline void GeneralizedAtomic::inc(volatile T* src) { + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + specialized_inc(reinterpret_cast(src)); +} + +template +inline void GeneralizedAtomic::inc(T* volatile* src) { + if (sizeof(T) != 1) { + add(1, src); + } else { + typedef typename IntegerTypes::Signed::type Raw; + specialized_inc(reinterpret_cast(src)); + } +} + +template +inline void GeneralizedAtomic::dec(volatile T* src) { + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + specialized_dec(reinterpret_cast(src)); +} + +template +inline void GeneralizedAtomic::dec(T* volatile* src) { + if (sizeof(T) != 1) { + add(-1, src); + } else { + typedef typename IntegerTypes::Signed::type Raw; + specialized_dec(reinterpret_cast(src)); + } +} + +template +inline U GeneralizedAtomic::xchg(T exchange_value, volatile U* dest) { + typedef typename IntegerTypes::Signed::type Raw; + U exchange_value_cast = exchange_value; + Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast), + reinterpret_cast(dest)); + return IntegerTypes::cast(result); +} + +template +inline U GeneralizedAtomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) { + typedef typename IntegerTypes::Signed::type Raw; + U exchange_value_cast = exchange_value; + U compare_value_cast = compare_value; + Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast), + reinterpret_cast(dest), + IntegerTypes::cast_to_signed(compare_value_cast), order); + return IntegerTypes::cast(result); +} + +template +inline void Atomic::store(T store_value, volatile U* dest) { + AtomicImpl::store(store_value, dest); +} -inline size_t Atomic::add(size_t add_value, volatile size_t* dest) { - return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest); +template +inline T Atomic::load(volatile T* src) { + return AtomicImpl::load(src); } -inline void Atomic::inc(volatile size_t* dest) { - inc_ptr((volatile intptr_t*) dest); +template +inline U Atomic::add(T add_value, volatile U* dst) { + return AtomicImpl::add(add_value, dst); } -inline void Atomic::dec(volatile size_t* dest) { - dec_ptr((volatile intptr_t*) dest); +template +inline void Atomic::inc(volatile T* src) { + AtomicImpl::inc(src); } +template +inline void Atomic::dec(volatile T* src) { + AtomicImpl::dec(src); +} + +template +inline U Atomic::xchg(T exchange_value, volatile U* dest) { + return AtomicImpl::xchg(exchange_value, dest); +} + +template +inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) { + return AtomicImpl::cmpxchg(exchange_value, dest, compare_value, order); +} + +// shared in-line definitions + #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE /* - * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg - * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition + * This is the default implementation of byte-sized cmpxchg. It emulates 8-bit-sized cmpxchg + * in terms of 32-bit-sized cmpxchg. Platforms may override this by defining their own inline definition * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific * implementation to be used instead. */ -inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, - jbyte compare_value, cmpxchg_memory_order order) { - STATIC_ASSERT(sizeof(jbyte) == 1); - volatile jint* dest_int = - reinterpret_cast(align_down(dest, sizeof(jint))); +template <> +inline int8_t GeneralizedAtomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, + int8_t compare_value, cmpxchg_memory_order order) { + volatile int32_t* dest_int = + reinterpret_cast(align_down(dest, sizeof(int32_t))); size_t offset = pointer_delta(dest, dest_int, 1); - jint cur = *dest_int; - jbyte* cur_as_bytes = reinterpret_cast(&cur); + int32_t cur = *dest_int; + int8_t* cur_as_bytes = reinterpret_cast(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different @@ -167,17 +399,17 @@ // barriers even on initial failure do { // value to swap in matches current value ... - jint new_value = cur; - // ... except for the one jbyte we want to update - reinterpret_cast(&new_value)[offset] = exchange_value; + int32_t new_value = cur; + // ... except for the one byte we want to update + reinterpret_cast(&new_value)[offset] = exchange_value; - jint res = cmpxchg(new_value, dest_int, cur, order); + int32_t res = cmpxchg(new_value, dest_int, cur, order); if (res == cur) break; // success - // at least one jbyte in the jint changed value, so update - // our view of the current jint + // at least one byte in the int changed value, so update + // our view of the current int cur = res; - // if our jbyte is still as cur we loop and try again + // if our byte is still as cur we loop and try again } while (cur_as_bytes[offset] == compare_value); return cur_as_bytes[offset]; @@ -185,20 +417,8 @@ #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { - assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); - return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); -} - -inline unsigned Atomic::cmpxchg(unsigned int exchange_value, - volatile unsigned int* dest, unsigned int compare_value, - cmpxchg_memory_order order) { - assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); - return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, - (jint)compare_value, order); -} - -inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { +template <> +inline int16_t GeneralizedAtomic::specialized_add(int16_t add_value, volatile int16_t* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit // word, then we can do this with an atomic add of (add_value << 16) @@ -210,20 +430,22 @@ // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. #ifdef VM_LITTLE_ENDIAN assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); - jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1)); + int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest-1)); #else assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); - jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest)); + int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest)); #endif - return (jshort)(new_value >> 16); // preserves sign + return (int16_t)(new_value >> 16); // preserves sign } -inline void Atomic::inc(volatile jshort* dest) { - (void)add(1, dest); +template <> +inline void GeneralizedAtomic::specialized_inc(volatile int16_t* dest) { + (void)add(int16_t(1), dest); } -inline void Atomic::dec(volatile jshort* dest) { - (void)add(-1, dest); +template <> +inline void GeneralizedAtomic::specialized_dec(volatile int16_t* dest) { + (void)add(int16_t(-1), dest); } #endif // SHARE_VM_RUNTIME_ATOMIC_HPP