--- old/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp 2017-08-16 01:47:20.749015895 -0400 +++ new/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp 2017-08-16 01:47:20.637010139 -0400 @@ -312,6 +312,8 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_CAST(1 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -380,11 +382,13 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_CAST(4 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). - unsigned int old_value; + T old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); @@ -419,7 +423,7 @@ cmpxchg_post_membar(order); - return IntegerTypes::cast(old_value); + return old_value; } template<> @@ -428,11 +432,13 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_CAST(8 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). - long old_value; + T old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); @@ -467,7 +473,7 @@ cmpxchg_post_membar(order); - return IntegerTypes::cast(old_value); + return old_value; } #undef strasm_sync --- old/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp 2017-08-16 01:47:21.281043168 -0400 +++ new/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp 2017-08-16 01:47:21.169037429 -0400 @@ -85,6 +85,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order /* order */) const { + STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) @@ -98,6 +99,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order /* order */) const { + STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) @@ -150,6 +152,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order /* order */) const { + STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) @@ -194,7 +197,8 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { - return cmpxchg_using_stub(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); + STATIC_ASSERT(8 == sizeof(T)); + return cmpxchg_using_helper(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); } inline jlong Atomic::load(const volatile jlong* src) { --- old/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp 2017-08-16 01:47:21.805070056 -0400 +++ new/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp 2017-08-16 01:47:21.693064308 -0400 @@ -277,11 +277,12 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_CAST(4 == sizeof(T)); #ifdef ARM - return cmpxchg_using_stub(arm_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(arm_compare_and_swap, exchange_value, dest, compare_value); #else #ifdef M68K - return cmpxchg_using_stub(m68k_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(m68k_compare_and_swap, exchange_value, dest, compare_value); #else return __sync_val_compare_and_swap(dest, compare_value, exchange_value); #endif // M68K @@ -294,6 +295,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_CAST(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } --- old/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp 2017-08-16 01:47:22.333097139 -0400 +++ new/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp 2017-08-16 01:47:22.221091386 -0400 @@ -91,6 +91,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order) const { + STATIC_ASSERT(byte_size == sizeof(T)); if (order == memory_order_relaxed) { T value = compare_value; __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, --- old/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-08-16 01:47:22.853123804 -0400 +++ new/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-08-16 01:47:22.745118270 -0400 @@ -200,6 +200,7 @@ // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering +// No direct support for cmpxchg of bytes; emulate using int. template<> struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; @@ -248,7 +249,7 @@ : "memory"); return rv; #else - return cmpxchg_using_stub(reorder_cmpxchg_func, exchange_value, dest, compare_value); + return cmpxchg_using_helper(reorder_cmpxchg_func, exchange_value, dest, compare_value); #endif } @@ -278,7 +279,7 @@ : "memory"); return rv; #else - return cmpxchg_using_stub(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); + return cmpxchg_using_helper(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); #endif } --- old/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp 2017-08-16 01:47:23.381150889 -0400 +++ new/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp 2017-08-16 01:47:23.269145147 -0400 @@ -312,6 +312,8 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(1 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -380,11 +382,13 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(4 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). - unsigned int old_value; + T old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); @@ -419,7 +423,7 @@ cmpxchg_post_membar(order); - return IntegerTypes::cast(old_value); + return old_value; } template<> @@ -428,11 +432,13 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(8 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). - long old_value; + T old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); @@ -467,7 +473,7 @@ cmpxchg_post_membar(order); - return IntegerTypes::cast(old_value); + return old_value; } #undef strasm_sync --- old/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp 2017-08-16 01:47:23.921178593 -0400 +++ new/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp 2017-08-16 01:47:23.809172841 -0400 @@ -478,6 +478,7 @@ // function is performed before the operand is fetched and again after the // operation is completed." +// No direct support for cmpxchg of bytes; emulate using int. template<> struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; @@ -487,6 +488,7 @@ T volatile* dest, T cmp_val, cmpxchg_memory_order unused) const { + STATIC_ASSERT(4 == sizeof(T)); unsigned long old; __asm__ __volatile__ ( @@ -510,7 +512,8 @@ T volatile* dest, T cmp_val, cmpxchg_memory_order unused) const { - unsigned long old; + STATIC_ASSERT(8 == sizeof(T)); + T old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. @@ -524,7 +527,7 @@ : "cc" ); - return IntegerTypes::cast(old); + return old; } inline jlong Atomic::load(const volatile jlong* src) { return *src; } --- old/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp 2017-08-16 01:47:24.453205872 -0400 +++ new/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp 2017-08-16 01:47:24.341200132 -0400 @@ -131,6 +131,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_ASSERT(4 == sizeof(T)); T rv; __asm__ volatile( " cas [%2], %3, %0" @@ -146,6 +147,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_ASSERT(8 == sizeof(T)); T rv; __asm__ volatile( " casx [%2], %3, %0" --- old/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp 2017-08-16 01:47:24.977232759 -0400 +++ new/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp 2017-08-16 01:47:24.869227210 -0400 @@ -85,6 +85,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order /* order */) const { + STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) @@ -98,6 +99,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order /* order */) const { + STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) @@ -150,6 +152,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order /* order */) const { + STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) @@ -194,7 +197,8 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { - return cmpxchg_using_stub(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); + STATIC_ASSERT(8 == sizeof(T)); + return cmpxchg_using_helper(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); } inline jlong Atomic::load(const volatile jlong* src) { --- old/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-08-16 01:47:25.505259831 -0400 +++ new/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-08-16 01:47:25.393254081 -0400 @@ -271,11 +271,12 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM - return cmpxchg_using_stub(arm_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(arm_compare_and_swap, exchange_value, dest, compare_value); #else #ifdef M68K - return cmpxchg_using_stub(m68k_compare_and_swap, exchange_value, dest, compare_value); + return cmpxchg_using_helper(m68k_compare_and_swap, exchange_value, dest, compare_value); #else return __sync_val_compare_and_swap(dest, compare_value, exchange_value); #endif // M68K @@ -288,6 +289,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_ASSERT(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } --- old/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp 2017-08-16 01:47:26.033286922 -0400 +++ new/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp 2017-08-16 01:47:25.921281173 -0400 @@ -101,6 +101,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(4 == sizeof(T)); T rv; __asm__ volatile( " cas [%2], %3, %0" @@ -116,6 +117,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(8 == sizeof(T)); T rv; __asm__ volatile( " casx [%2], %3, %0" --- old/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp 2017-08-16 01:47:26.553313594 -0400 +++ new/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp 2017-08-16 01:47:26.441307847 -0400 @@ -68,7 +68,7 @@ return _Atomic_xchg(exchange_value, dest); } -// Not using cmpxchg_using_stub here, because some configurations of +// Not using cmpxchg_using_helper here, because some configurations of // Solaris compiler don't deal well with passing a "defined in .il" // function as an argument. We *should* switch to using gcc-style // inline assembly, but attempting to do so with Studio 12.4 ran into @@ -80,6 +80,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(1 == sizeof(T)); return IntegerTypes::cast( _Atomic_cmpxchg_byte(IntegerTypes::cast(exchange_value), reinterpret_cast(dest), @@ -92,6 +93,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(4 == sizeof(T)); return IntegerTypes::cast( _Atomic_cmpxchg(IntegerTypes::cast(exchange_value), reinterpret_cast(dest), @@ -104,6 +106,7 @@ T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_ASSERT(8 == sizeof(T)); return IntegerTypes::cast( _Atomic_cmpxchg_long(IntegerTypes::cast(exchange_value), reinterpret_cast(dest), --- old/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp 2017-08-16 01:47:27.077340467 -0400 +++ new/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp 2017-08-16 01:47:26.965334717 -0400 @@ -116,8 +116,9 @@ T volatile* dest, \ T compare_value, \ cmpxchg_memory_order order) const { \ - return cmpxchg_using_stub(StubName, exchange_value, dest, compare_value); \ -} + STATIC_ASSERT(ByteSize == sizeof(T)); \ + return cmpxchg_using_helper(StubName, exchange_value, dest, compare_value); \ + } DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func) DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func) --- old/src/share/vm/metaprogramming/integerTypes.hpp 2017-08-16 01:47:27.609367751 -0400 +++ new/src/share/vm/metaprogramming/integerTypes.hpp 2017-08-16 01:47:27.497362002 -0400 @@ -26,13 +26,12 @@ #define SHARE_VM_METAPROGRAMMING_INTEGERTYPES_HPP #include "memory/allocation.hpp" -#include "utilities/debug.hpp" #include "metaprogramming/enableIf.hpp" #include "metaprogramming/integralConstant.hpp" #include "metaprogramming/isFloatingPoint.hpp" -#include "metaprogramming/isPointer.hpp" -#include "metaprogramming/isRegisteredEnum.hpp" #include "metaprogramming/isIntegral.hpp" +#include "metaprogramming/isRegisteredEnum.hpp" +#include "utilities/debug.hpp" class IntegerTypes : public AllStatic { public: @@ -55,102 +54,113 @@ // same representation as x. template struct Translate : public FalseType {}; - // Value categories. For internal use, but needs to be public. - enum Category { - INTEGRAL, - ENUM, - FLOAT, - POINTER - }; - private: - template struct GetCategory; - template::value, - Category from_category = GetCategory::value> + typename Enable = void> struct Cast; - template static T cast_integral(U x); - template static T cast_floating_point(U x); + template static T cast_using_union(U x); }; -#define DEFINE_GET_CATEGORY(Predicate, Value) \ - template \ - struct IntegerTypes::GetCategory< \ - T, \ - typename EnableIf::value>::type> \ - : IntegralConstant \ - {}; - -DEFINE_GET_CATEGORY(IsIntegral, INTEGRAL) -DEFINE_GET_CATEGORY(IsRegisteredEnum, ENUM) -DEFINE_GET_CATEGORY(IsFloatingPoint, FLOAT) -DEFINE_GET_CATEGORY(IsPointer, POINTER) - -#undef DEFINE_GET_CATEGORY - -// Convert between different integral types of the same size. -// See C++03 3.10/15 for discussion of reinterpret_cast to a reference -// as a means for converting integral types while keeping the representation. -template -inline T IntegerTypes::cast_integral(U x) { - STATIC_ASSERT(sizeof(T) == sizeof(U)); - return reinterpret_cast(x); -} - -// Convert between an integral type and a floating point type of the -// same size. The only truly correct way to do this is with memcpy. -// Both the union trick and type punning via casts are undefined -// behavior. gcc generates exactly the same code for all three methods, -// except where the UB of type punning causes it to go off into the weeds. -// (gcc explicitly supports the union trick.) Other compilers may vary. -// In particular, not all compilers do a good job with the memcpy. +// Return an object of type T with the same value representation as x. +// +// T and U must be of the same size. It is expected that one of T and +// U is an integral type, and the other is an integral type, a +// (registered) enum type, or a floating point type +// +// This implementation uses the "union trick", which seems to be the +// best of a bad set of options. Though technically undefined +// behavior, it is widely and well supported, producing good code. In +// some cases, such as gcc, that support is explicitly documented. +// +// Using memcpy is the correct method, but some compilers produce +// wretched code for that method, even at maximal optimization levels. +// +// Using static_cast is only possible for integral and enum types, not +// for floating point types. And for integral and enum conversions, +// static_cast has unspecified or implementation-defined behavior for +// some cases. C++11 can be used to avoid most or all +// of those unspecified or implementation-defined issues, though that +// may require multi-step conversions. +// +// Using reinterpret_cast of references has undefined behavior for +// many cases, and there is much less empirical basis for its use, as +// compared to the union trick. template -inline T IntegerTypes::cast_floating_point(U x) { +inline T IntegerTypes::cast_using_union(U x) { STATIC_ASSERT(sizeof(T) == sizeof(U)); - T result; - memcpy(&result, &x, sizeof(x)); - return result; + union { T t; U u; }; + u = x; + return t; } ////////////////////////////////////////////////////////////////////////////// // cast(x) // -// Cast +// Cast // Give an informative error if the sizes differ. -template -struct IntegerTypes::Cast +template +struct IntegerTypes::Cast VALUE_OBJ_CLASS_SPEC { + STATIC_ASSERT(sizeof(T) == sizeof(U)); +}; + +// Conversion between integral types. +template +struct IntegerTypes::Cast< + T, U, true, + typename EnableIf::value && IsIntegral::value>::type> VALUE_OBJ_CLASS_SPEC { - STATIC_ASSERT(sizeof(T) == sizeof(U)); + T operator()(U x) const { return cast_using_union(x); } +}; + +// Convert an enum or floating point value to an integer value. +template +struct IntegerTypes::Cast< + T, U, true, + typename EnableIf::value && + (IsRegisteredEnum::value || + IsFloatingPoint::value)>::type> + VALUE_OBJ_CLASS_SPEC +{ + T operator()(U x) const { return cast_using_union(x); } }; -#define DEFINE_INTEGER_TYPES_CAST(To, From, Convert) \ - template \ - struct IntegerTypes::Cast \ - VALUE_OBJ_CLASS_SPEC \ - { \ - T operator()(U x) const { return Convert(x); } \ - }; - -DEFINE_INTEGER_TYPES_CAST(INTEGRAL, INTEGRAL, cast_integral) -DEFINE_INTEGER_TYPES_CAST(ENUM, INTEGRAL, cast_integral) -DEFINE_INTEGER_TYPES_CAST(INTEGRAL, ENUM, cast_integral) -DEFINE_INTEGER_TYPES_CAST(FLOAT, INTEGRAL, cast_floating_point) -DEFINE_INTEGER_TYPES_CAST(INTEGRAL, FLOAT, cast_floating_point) -DEFINE_INTEGER_TYPES_CAST(POINTER, INTEGRAL, reinterpret_cast) -DEFINE_INTEGER_TYPES_CAST(INTEGRAL, POINTER, reinterpret_cast) +// Convert an integer to an enum or floating point value. +template +struct IntegerTypes::Cast< + T, U, true, + typename EnableIf::value && + (IsRegisteredEnum::value || + IsFloatingPoint::value)>::type> + VALUE_OBJ_CLASS_SPEC +{ + T operator()(U x) const { return cast_using_union(x); } +}; -#undef DEFINE_INTEGER_TYPES_CAST +// Convert a pointer to an integral value. +template +struct IntegerTypes::Cast< + T, U*, true, + typename EnableIf::value>::type> + VALUE_OBJ_CLASS_SPEC +{ + T operator()(U* x) const { return reinterpret_cast(x); } +}; + +// Convert an integral value to a pointer. +template +struct IntegerTypes::Cast< + T*, U, true, + typename EnableIf::value>::type> + VALUE_OBJ_CLASS_SPEC +{ + T* operator()(U x) const { return reinterpret_cast(x); } +}; template inline T IntegerTypes::cast(U x) { --- old/src/share/vm/runtime/atomic.hpp 2017-08-16 01:47:28.145395241 -0400 +++ new/src/share/vm/runtime/atomic.hpp 2017-08-16 01:47:28.029389292 -0400 @@ -123,15 +123,26 @@ U compare_value, cmpxchg_memory_order order = memory_order_conservative); + // Performs atomic compare of *dest and NULL, and replaces *dest + // with exchange_value if the comparison succeeded. Returns true if + // the comparison succeeded and the exchange occurred. This is + // often used as part of lazy initialization, as a lock-free + // alternative to the Double-Checked Locking Pattern. template - inline static bool conditional_store_ptr(T* value, D* volatile* dest, - cmpxchg_memory_order order = memory_order_conservative); + inline static bool replace_if_null(T* value, D* volatile* dest, + cmpxchg_memory_order order = memory_order_conservative); - inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) { + inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, + volatile intptr_t* dest, + intptr_t compare_value, + cmpxchg_memory_order order = memory_order_conservative) { return cmpxchg(exchange_value, dest, compare_value, order); } - inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative) { + inline static void* cmpxchg_ptr(void* exchange_value, + volatile void* dest, + void* compare_value, + cmpxchg_memory_order order = memory_order_conservative) { return cmpxchg(exchange_value, reinterpret_cast(dest), compare_value, @@ -156,14 +167,14 @@ // of 1, 4, and 8 are required. The class is a function object that // must be default constructable, with these requirements: // - // - dest is of type D*. - // - exchange_value and compare_value are of type D. + // - dest is of type T*. + // - exchange_value and compare_value are of type T. // - order is of type cmpxchg_memory_order. - // - platform_cmpxchg is an object of type PlatformCmpxchg. + // - platform_cmpxchg is an object of type PlatformCmpxchg. // // Then - // platform_cmpxchg()(exchange_value, dest, compare_value, order) - // must be a valid expression, returning a result convertible to D. + // platform_cmpxchg(exchange_value, dest, compare_value, order) + // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template // T operator()(T, T volatile*, T, cmpxchg_memory_order) const @@ -178,24 +189,26 @@ // The generic arguments passed to PlatformCmpxchg need to be // translated to the appropriate type for the helper function, the // helper invoked on the translated arguments, and the result - // translated back. - template - static T cmpxchg_using_stub(StubFn stub_fn, - T exchange_value, - T volatile* dest, - T compare_value); - - // Support platforms that do not provide RMW byte-level atomic access - // To use, derive PlatformCmpxchg<1> from this class. - // Can't be private: C++03 11.4/2; fixed in C++11. -public: + // translated back. Type is the parameter / return type of the + // helper function. + template + static T cmpxchg_using_helper(Fn fn, + T exchange_value, + T volatile* dest, + T compare_value); + + // Support platforms that do not provide Read-Modify-Write + // byte-level atomic access. To use, derive PlatformCmpxchg<1> from + // this class. +public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. struct CmpxchgByteUsingInt; private: }; template struct Atomic::IsPointerConvertible : AllStatic { - // Use the "sizeof trick" to test for convertibility. + // Determine whether From* is implicitly convertible to To*, using + // the "sizeof trick". typedef char yes; typedef char (&no)[2]; @@ -206,11 +219,11 @@ static const bool value = (sizeof(yes) == sizeof(test(test_value))); }; -// Define class before including platform file, which may specialize +// Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic -// specializations of the class. That all needs to be provided by the -// platform file. +// specializations of the class. The platform file is responsible for +// providing those. template struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC { template @@ -220,9 +233,9 @@ cmpxchg_memory_order order) const; }; -// Define class before including platform file, which may use this as -// a base class, requiring it be complete. The operator template -// definition is defined later. +// Define the class before including platform file, which may use this +// as a base class, requiring it be complete. The definition is later +// in this file, near the other definitions related to cmpxchg. struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC { template T operator()(T exchange_value, @@ -263,8 +276,11 @@ } template -inline bool Atomic::conditional_store_ptr(T* value, D* volatile* dest, - cmpxchg_memory_order order) { +inline bool Atomic::replace_if_null(T* value, D* volatile* dest, + cmpxchg_memory_order order) { + // Presently using a trivial implementation in terms of cmpxchg. + // Consider adding platform support, to permit the use of compiler + // intrinsics like gcc's __sync_bool_compare_and_swap. D* expected_null = NULL; return expected_null == cmpxchg(value, dest, expected_null, order); } @@ -310,9 +326,8 @@ // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; // Don't care what the CV qualifiers for compare_value are, - // but we need to match D when calling platform support. - typedef typename RemoveCV::type U_nocv; - D* old_value = const_cast(compare_value); + // but we need to match D* when calling platform support. + D* old_value = const_cast(compare_value); return PlatformCmpxchg()(new_value, dest, old_value, order); } }; @@ -343,16 +358,16 @@ } }; -template -inline T Atomic::cmpxchg_using_stub(StubFn stub_fn, - T exchange_value, - T volatile* dest, - T compare_value) { - STATIC_ASSERT(sizeof(StubType) == sizeof(T)); +template +inline T Atomic::cmpxchg_using_helper(Fn fn, + T exchange_value, + T volatile* dest, + T compare_value) { + STATIC_ASSERT(sizeof(Type) == sizeof(T)); return IntegerTypes::cast( - stub_fn(IntegerTypes::cast(exchange_value), - reinterpret_cast(dest), - IntegerTypes::cast(compare_value))); + fn(IntegerTypes::cast(exchange_value), + reinterpret_cast(dest), + IntegerTypes::cast(compare_value))); } template --- old/src/share/vm/utilities/bitMap.cpp 2017-08-16 01:47:28.685422942 -0400 +++ new/src/share/vm/utilities/bitMap.cpp 2017-08-16 01:47:28.569416985 -0400 @@ -617,7 +617,7 @@ return true; } -BitMap::idx_t* BitMap::_pop_count_table = NULL; +const BitMap::idx_t* BitMap::_pop_count_table = NULL; void BitMap::init_pop_count_table() { if (_pop_count_table == NULL) { @@ -626,7 +626,7 @@ table[i] = num_set_bits(i); } - if (!Atomic::conditional_store_ptr(table, &_pop_count_table)) { + if (!Atomic::replace_if_null(table, &_pop_count_table)) { guarantee(_pop_count_table != NULL, "invariant"); FREE_C_HEAP_ARRAY(idx_t, table); } --- old/src/share/vm/utilities/bitMap.hpp 2017-08-16 01:47:29.217450217 -0400 +++ new/src/share/vm/utilities/bitMap.hpp 2017-08-16 01:47:29.105444473 -0400 @@ -114,7 +114,7 @@ void verify_range(idx_t beg_index, idx_t end_index) const NOT_DEBUG_RETURN; // Statistics. - static idx_t* _pop_count_table; + static const idx_t* _pop_count_table; static void init_pop_count_table(); static idx_t num_set_bits(bm_word_t w); static idx_t num_set_bits_from_table(unsigned char c);