--- old/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-12-15 23:51:30.864810262 +0100 +++ new/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-12-15 23:51:30.636806520 +0100 @@ -28,17 +28,23 @@ // Implementation of class atomic template -struct Atomic::PlatformAdd - : Atomic::FetchAndAdd > -{ +struct Atomic::PlatformFetchAndAdd { template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; +}; + +template +struct Atomic::PlatformAddAndFetch { + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { + return Atomic::PlatformFetchAndAdd()(dest, add_value, order) + add_value; + } }; template<> template -inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAddAndFetch<4>::operator()(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -94,8 +100,8 @@ template<> template -inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformFetchAndAdd<8>::operator()(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; --- old/src/hotspot/share/gc/z/zArray.inline.hpp 2019-12-15 23:51:31.236816367 +0100 +++ new/src/hotspot/share/gc/z/zArray.inline.hpp 2019-12-15 23:51:31.032813019 +0100 @@ -101,7 +101,7 @@ template inline bool ZArrayIteratorImpl::next(T* elem) { if (parallel) { - const size_t next = Atomic::add(&_next, 1u) - 1u; + const size_t next = Atomic::fetch_and_add(&_next, 1u); if (next < _array->size()) { *elem = _array->at(next); return true; --- old/src/hotspot/share/gc/z/zMarkStackAllocator.cpp 2019-12-15 23:51:31.652823194 +0100 +++ new/src/hotspot/share/gc/z/zMarkStackAllocator.cpp 2019-12-15 23:51:31.400819058 +0100 @@ -110,7 +110,7 @@ // Increment top before end to make sure another // thread can't steal out newly expanded space. - addr = Atomic::add(&_top, size) - size; + addr = Atomic::fetch_and_add(&_top, size); Atomic::add(&_end, expand_size); return addr; --- old/src/hotspot/share/gc/z/zNMethodTableIteration.cpp 2019-12-15 23:51:32.076830152 +0100 +++ new/src/hotspot/share/gc/z/zNMethodTableIteration.cpp 2019-12-15 23:51:31.824826016 +0100 @@ -58,7 +58,7 @@ // Claim table partition. Each partition is currently sized to span // two cache lines. This number is just a guess, but seems to work well. const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); - const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size); + const size_t partition_start = MIN2(Atomic::fetch_and_add(&_claimed, partition_size), _size); const size_t partition_end = MIN2(partition_start + partition_size, _size); if (partition_start == partition_end) { // End of table --- old/src/hotspot/share/gc/z/zPageAllocator.cpp 2019-12-15 23:51:32.504837176 +0100 +++ new/src/hotspot/share/gc/z/zPageAllocator.cpp 2019-12-15 23:51:32.248832974 +0100 @@ -176,7 +176,7 @@ for (;;) { // Get granule offset const size_t size = ZGranuleSize; - const uintptr_t offset = Atomic::add(&_start, size) - size; + const uintptr_t offset = Atomic::fetch_and_add(&_start, size); if (offset >= _end) { // Done break; --- old/src/hotspot/share/gc/z/zRelocationSet.inline.hpp 2019-12-15 23:51:32.948844462 +0100 +++ new/src/hotspot/share/gc/z/zRelocationSet.inline.hpp 2019-12-15 23:51:32.700840393 +0100 @@ -38,7 +38,7 @@ if (parallel) { if (_next < nforwardings) { - const size_t next = Atomic::add(&_next, 1u) - 1u; + const size_t next = Atomic::fetch_and_add(&_next, 1u); if (next < nforwardings) { *forwarding = _relocation_set->_forwardings[next]; return true; --- old/src/hotspot/share/runtime/atomic.hpp 2019-12-15 23:51:33.376851486 +0100 +++ new/src/hotspot/share/runtime/atomic.hpp 2019-12-15 23:51:33.116847219 +0100 @@ -105,6 +105,10 @@ atomic_memory_order order = memory_order_conservative); template + inline static D fetch_and_add(D volatile* dest, I add_value, + atomic_memory_order order = memory_order_conservative); + + template inline static D sub(D volatile* dest, I sub_value, atomic_memory_order order = memory_order_conservative); @@ -221,9 +225,10 @@ template struct PlatformOrderedLoad; private: - // Dispatch handler for add. Provides type-based validity checking - // and limited conversions around calls to the platform-specific - // implementation layer provided by PlatformAdd. + // Dispatch handler for add and fetch_and add. Provides type-based + // validity checking and limited conversions around calls to the + // platform-specific implementation layer provided by + // PlatformAddAndFetch and PlatformFetchAndAdd. template struct AddImpl; @@ -236,7 +241,7 @@ // - add_value is of type I, an integral type. // - sizeof(I) == sizeof(D). // - if D is an integral type, I == D. - // - platform_add is an object of type PlatformAdd. + // - platform_add is an object of type PlatformAddAndFetch. // // Then // platform_add(dest, add_value) @@ -244,38 +249,11 @@ // // No definition is provided; all platforms must explicitly define // this class and any needed specializations. - template struct PlatformAdd; + template struct PlatformAddAndFetch; - // Helper base classes for defining PlatformAdd. To use, define - // PlatformAdd or a specialization that derives from one of these, - // and include in the PlatformAdd definition the support function - // (described below) required by the base class. - // - // These classes implement the required function object protocol for - // PlatformAdd, using a support function template provided by the - // derived class. Let add_value (of type I) and dest (of type D) be - // the arguments the object is called with. If D is a pointer type - // P*, then let addend (of type I) be add_value * sizeof(P); - // otherwise, addend is add_value. - // - // FetchAndAdd requires the derived class to provide - // fetch_and_add(dest, addend) - // atomically adding addend to the value of dest, and returning the - // old value. - // - // AddAndFetch requires the derived class to provide - // add_and_fetch(dest, addend) - // atomically adding addend to the value of dest, and returning the - // new value. - // - // When D is a pointer type P*, both fetch_and_add and add_and_fetch - // treat it as if it were a uintptr_t; they do not perform any - // scaling of the addend, as that has already been done by the - // caller. -public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. - template struct FetchAndAdd; - template struct AddAndFetch; -private: + // Platfom-specific implementation of fetch_and_add. + // See comment for PlatformAddAndFetch for further specification. + template struct PlatformFetchAndAdd; // Support for platforms that implement some variants of add using a // (typically out of line) non-template helper function. The @@ -511,22 +489,6 @@ } }; -// Define FetchAndAdd and AddAndFetch helper classes before including -// platform file, which may use these as base classes, requiring they -// be complete. - -template -struct Atomic::FetchAndAdd { - template - D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; -}; - -template -struct Atomic::AddAndFetch { - template - D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; -}; - template inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer::value || IsIntegral::value); @@ -681,7 +643,13 @@ template inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { - return AddImpl()(dest, add_value, order); + return AddImpl::add_and_fetch(dest, add_value, order); +} + +template +inline D Atomic::fetch_and_add(D volatile* dest, I add_value, + atomic_memory_order order) { + return AddImpl::fetch_and_add(dest, add_value, order); } template @@ -692,9 +660,13 @@ (sizeof(I) <= sizeof(D)) && (IsSigned::value == IsSigned::value)>::type> { - D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { + static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) { + D addend = add_value; + return PlatformAddAndFetch()(dest, addend, order); + } + static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) { D addend = add_value; - return PlatformAdd()(dest, addend, order); + return PlatformFetchAndAdd()(dest, addend, order); } }; @@ -703,40 +675,25 @@ P*, I, typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> { - P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const { - STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); - STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); - typedef typename Conditional::value, - intptr_t, - uintptr_t>::type CI; - CI addend = add_value; - return PlatformAdd()(dest, addend, order); - } -}; + STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); + STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); + typedef typename Conditional::value, + intptr_t, + uintptr_t>::type CI; -template -template -inline D Atomic::FetchAndAdd::operator()(D volatile* dest, I add_value, - atomic_memory_order order) const { - I addend = add_value; - // If D is a pointer type P*, scale by sizeof(P). - if (IsPointer::value) { - addend *= sizeof(typename RemovePointer::type); + static I scale_addend(I add_value) { + return add_value * sizeof(P); } - D old = static_cast(this)->fetch_and_add(dest, addend, order); - return old + add_value; -} -template -template -inline D Atomic::AddAndFetch::operator()(D volatile* dest, I add_value, - atomic_memory_order order) const { - // If D is a pointer type P*, scale by sizeof(P). - if (IsPointer::value) { - add_value *= sizeof(typename RemovePointer::type); + static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) { + CI addend = add_value; + return PlatformAddAndFetch()(dest, scale_addend(addend), order); } - return static_cast(this)->add_and_fetch(dest, add_value, order); -} + static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) { + CI addend = add_value; + return PlatformFetchAndAdd()(dest, scale_addend(addend), order); + } +}; template inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {