< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page

        

*** 103,112 **** --- 103,116 ---- template<typename D, typename I> inline static D add(D volatile* dest, I add_value, atomic_memory_order order = memory_order_conservative); template<typename D, typename I> + inline static D fetch_and_add(D volatile* dest, I add_value, + atomic_memory_order order = memory_order_conservative); + + template<typename D, typename I> inline static D sub(D volatile* dest, I sub_value, atomic_memory_order order = memory_order_conservative); // Atomically increment location. inc() provide: // <fence> increment-dest <membar StoreLoad|StoreStore>
*** 219,231 **** // Give platforms a variation point to specialize. template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore; template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad; private: ! // Dispatch handler for add. Provides type-based validity checking ! // and limited conversions around calls to the platform-specific ! // implementation layer provided by PlatformAdd. template<typename D, typename I, typename Enable = void> struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 // bytes and (if different) pointer size bytes are required. The --- 223,236 ---- // Give platforms a variation point to specialize. template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore; template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad; private: ! // Dispatch handler for add and fetch_and add. Provides type-based ! // validity checking and limited conversions around calls to the ! // platform-specific implementation layer provided by ! // PlatformAddAndFetch and PlatformFetchAndAdd. template<typename D, typename I, typename Enable = void> struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 // bytes and (if different) pointer size bytes are required. The
*** 234,283 **** // // - dest is of type D*, an integral or pointer type. // - add_value is of type I, an integral type. // - sizeof(I) == sizeof(D). // - if D is an integral type, I == D. ! // - platform_add is an object of type PlatformAdd<sizeof(D)>. // // Then // platform_add(dest, add_value) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define // this class and any needed specializations. ! template<size_t byte_size> struct PlatformAdd; ! // Helper base classes for defining PlatformAdd. To use, define ! // PlatformAdd or a specialization that derives from one of these, ! // and include in the PlatformAdd definition the support function ! // (described below) required by the base class. ! // ! // These classes implement the required function object protocol for ! // PlatformAdd, using a support function template provided by the ! // derived class. Let add_value (of type I) and dest (of type D) be ! // the arguments the object is called with. If D is a pointer type ! // P*, then let addend (of type I) be add_value * sizeof(P); ! // otherwise, addend is add_value. ! // ! // FetchAndAdd requires the derived class to provide ! // fetch_and_add(dest, addend) ! // atomically adding addend to the value of dest, and returning the ! // old value. ! // ! // AddAndFetch requires the derived class to provide ! // add_and_fetch(dest, addend) ! // atomically adding addend to the value of dest, and returning the ! // new value. ! // ! // When D is a pointer type P*, both fetch_and_add and add_and_fetch ! // treat it as if it were a uintptr_t; they do not perform any ! // scaling of the addend, as that has already been done by the ! // caller. ! public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. ! template<typename Derived> struct FetchAndAdd; ! template<typename Derived> struct AddAndFetch; ! private: // Support for platforms that implement some variants of add using a // (typically out of line) non-template helper function. The // generic arguments passed to PlatformAdd need to be translated to // the appropriate type for the helper function, the helper function --- 239,261 ---- // // - dest is of type D*, an integral or pointer type. // - add_value is of type I, an integral type. // - sizeof(I) == sizeof(D). // - if D is an integral type, I == D. ! // - platform_add is an object of type PlatformAddAndFetch<sizeof(D)>. // // Then // platform_add(dest, add_value) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define // this class and any needed specializations. ! template<size_t byte_size> struct PlatformAddAndFetch; ! // Platfom-specific implementation of fetch_and_add. ! // See comment for PlatformAddAndFetch for further specification. ! template<size_t byte_size> struct PlatformFetchAndAdd; // Support for platforms that implement some variants of add using a // (typically out of line) non-template helper function. The // generic arguments passed to PlatformAdd need to be translated to // the appropriate type for the helper function, the helper function
*** 509,534 **** STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization (void)const_cast<T&>(*dest = new_value); } }; - // Define FetchAndAdd and AddAndFetch helper classes before including - // platform file, which may use these as base classes, requiring they - // be complete. - - template<typename Derived> - struct Atomic::FetchAndAdd { - template<typename D, typename I> - D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; - }; - - template<typename Derived> - struct Atomic::AddAndFetch { - template<typename D, typename I> - D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; - }; - template<typename D> inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; Atomic::add(dest, I(1), order); --- 487,496 ----
*** 679,744 **** } template<typename D, typename I> inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { ! return AddImpl<D, I>()(dest, add_value, order); } template<typename D, typename I> struct Atomic::AddImpl< D, I, typename EnableIf<IsIntegral<I>::value && IsIntegral<D>::value && (sizeof(I) <= sizeof(D)) && (IsSigned<I>::value == IsSigned<D>::value)>::type> { ! D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { D addend = add_value; ! return PlatformAdd<sizeof(D)>()(dest, addend, order); } }; template<typename P, typename I> struct Atomic::AddImpl< P*, I, typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> { ! P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const { ! STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); ! STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); ! typedef typename Conditional<IsSigned<I>::value, ! intptr_t, ! uintptr_t>::type CI; ! CI addend = add_value; ! return PlatformAdd<sizeof(P*)>()(dest, addend, order); ! } ! }; ! template<typename Derived> ! template<typename D, typename I> ! inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value, ! atomic_memory_order order) const { ! I addend = add_value; ! // If D is a pointer type P*, scale by sizeof(P). ! if (IsPointer<D>::value) { ! addend *= sizeof(typename RemovePointer<D>::type); } - D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order); - return old + add_value; - } ! template<typename Derived> ! template<typename D, typename I> ! inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value, ! atomic_memory_order order) const { ! // If D is a pointer type P*, scale by sizeof(P). ! if (IsPointer<D>::value) { ! add_value *= sizeof(typename RemovePointer<D>::type); } ! return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order); ! } template<typename Type, typename Fn, typename D, typename I> inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast<D>( fn(PrimitiveConversions::cast<Type>(add_value), --- 641,701 ---- } template<typename D, typename I> inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { ! return AddImpl<D, I>::add_and_fetch(dest, add_value, order); ! } ! ! template<typename D, typename I> ! inline D Atomic::fetch_and_add(D volatile* dest, I add_value, ! atomic_memory_order order) { ! return AddImpl<D, I>::fetch_and_add(dest, add_value, order); } template<typename D, typename I> struct Atomic::AddImpl< D, I, typename EnableIf<IsIntegral<I>::value && IsIntegral<D>::value && (sizeof(I) <= sizeof(D)) && (IsSigned<I>::value == IsSigned<D>::value)>::type> { ! static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) { ! D addend = add_value; ! return PlatformAddAndFetch<sizeof(D)>()(dest, addend, order); ! } ! static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) { D addend = add_value; ! return PlatformFetchAndAdd<sizeof(D)>()(dest, addend, order); } }; template<typename P, typename I> struct Atomic::AddImpl< P*, I, typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> { ! STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); ! STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); ! typedef typename Conditional<IsSigned<I>::value, ! intptr_t, ! uintptr_t>::type CI; ! static I scale_addend(I add_value) { ! return add_value * sizeof(P); } ! static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) { ! CI addend = add_value; ! return PlatformAddAndFetch<sizeof(P*)>()(dest, scale_addend(addend), order); } ! static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) { ! CI addend = add_value; ! return PlatformFetchAndAdd<sizeof(P*)>()(dest, scale_addend(addend), order); ! } ! }; template<typename Type, typename Fn, typename D, typename I> inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast<D>( fn(PrimitiveConversions::cast<Type>(add_value),
< prev index next >