< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page

        

*** 77,93 **** // Atomically store to a location // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. ! template<typename T, typename D> ! inline static void store(T store_value, volatile D* dest); ! template <typename T, typename D> inline static void release_store(volatile D* dest, T store_value); ! template <typename T, typename D> inline static void release_store_fence(volatile D* dest, T store_value); // Atomically load from a location // The type T must be either a pointer type, an integral/enum type, // or a type that is primitive convertible using PrimitiveConversions. --- 77,93 ---- // Atomically store to a location // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. ! template<typename D, typename T> ! inline static void store(volatile D* dest, T store_value); ! template <typename D, typename T> inline static void release_store(volatile D* dest, T store_value); ! template <typename D, typename T> inline static void release_store_fence(volatile D* dest, T store_value); // Atomically load from a location // The type T must be either a pointer type, an integral/enum type, // or a type that is primitive convertible using PrimitiveConversions.
*** 98,113 **** inline static T load_acquire(const volatile T* dest); // Atomically add to a location. Returns updated value. add*() provide: // <fence> add-value-to-dest <membar StoreLoad|StoreStore> ! template<typename I, typename D> ! inline static D add(I add_value, D volatile* dest, atomic_memory_order order = memory_order_conservative); ! template<typename I, typename D> ! inline static D sub(I sub_value, D volatile* dest, atomic_memory_order order = memory_order_conservative); // Atomically increment location. inc() provide: // <fence> increment-dest <membar StoreLoad|StoreStore> // The type D may be either a pointer type, or an integral --- 98,113 ---- inline static T load_acquire(const volatile T* dest); // Atomically add to a location. Returns updated value. add*() provide: // <fence> add-value-to-dest <membar StoreLoad|StoreStore> ! template<typename D, typename I> ! inline static D add(D volatile* dest, I add_value, atomic_memory_order order = memory_order_conservative); ! template<typename D, typename I> ! inline static D sub(D volatile* dest, I sub_value, atomic_memory_order order = memory_order_conservative); // Atomically increment location. inc() provide: // <fence> increment-dest <membar StoreLoad|StoreStore> // The type D may be either a pointer type, or an integral
*** 130,161 **** // prior value of *dest. xchg*() provide: // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore> // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. ! template<typename T, typename D> ! inline static D xchg(T exchange_value, volatile D* dest, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // <fence> compare-and-exchange <membar StoreLoad|StoreStore> ! template<typename T, typename D, typename U> ! inline static D cmpxchg(T exchange_value, ! D volatile* dest, U compare_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and NULL, and replaces *dest // with exchange_value if the comparison succeeded. Returns true if // the comparison succeeded and the exchange occurred. This is // often used as part of lazy initialization, as a lock-free // alternative to the Double-Checked Locking Pattern. ! template<typename T, typename D> ! inline static bool replace_if_null(T* value, D* volatile* dest, atomic_memory_order order = memory_order_conservative); private: WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private // Test whether From is implicitly convertible to To. --- 130,161 ---- // prior value of *dest. xchg*() provide: // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore> // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. ! template<typename D, typename T> ! inline static D xchg(volatile D* dest, T exchange_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // <fence> compare-and-exchange <membar StoreLoad|StoreStore> ! template<typename D, typename U, typename T> ! inline static D cmpxchg(D volatile* dest, U compare_value, + T exchange_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and NULL, and replaces *dest // with exchange_value if the comparison succeeded. Returns true if // the comparison succeeded and the exchange occurred. This is // often used as part of lazy initialization, as a lock-free // alternative to the Double-Checked Locking Pattern. ! template<typename D, typename T> ! inline static bool replace_if_null(D* volatile* dest, T* value, atomic_memory_order order = memory_order_conservative); private: WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private // Test whether From is implicitly convertible to To.
*** 166,176 **** protected: // Dispatch handler for store. Provides type-based validity // checking and limited conversions around calls to the platform- // specific implementation layer provided by PlatformOp. ! template<typename T, typename D, typename PlatformOp, typename Enable = void> struct StoreImpl; // Platform-specific implementation of store. Support for sizes // of 1, 2, 4, and (if different) pointer size bytes are required. // The class is a function object that must be default constructable, --- 166,176 ---- protected: // Dispatch handler for store. Provides type-based validity // checking and limited conversions around calls to the platform- // specific implementation layer provided by PlatformOp. ! template<typename D, typename T, typename PlatformOp, typename Enable = void> struct StoreImpl; // Platform-specific implementation of store. Support for sizes // of 1, 2, 4, and (if different) pointer size bytes are required. // The class is a function object that must be default constructable,
*** 222,232 **** private: // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific // implementation layer provided by PlatformAdd. ! template<typename I, typename D, typename Enable = void> struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 // bytes and (if different) pointer size bytes are required. The // class is a function object that must be default constructable, --- 222,232 ---- private: // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific // implementation layer provided by PlatformAdd. ! template<typename D, typename I, typename Enable = void> struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 // bytes and (if different) pointer size bytes are required. The // class is a function object that must be default constructable,
*** 237,247 **** // - sizeof(I) == sizeof(D). // - if D is an integral type, I == D. // - platform_add is an object of type PlatformAdd<sizeof(D)>. // // Then ! // platform_add(add_value, dest) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define // this class and any needed specializations. template<size_t byte_size> struct PlatformAdd; --- 237,247 ---- // - sizeof(I) == sizeof(D). // - if D is an integral type, I == D. // - platform_add is an object of type PlatformAdd<sizeof(D)>. // // Then ! // platform_add(dest, add_value) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define // this class and any needed specializations. template<size_t byte_size> struct PlatformAdd;
*** 257,272 **** // the arguments the object is called with. If D is a pointer type // P*, then let addend (of type I) be add_value * sizeof(P); // otherwise, addend is add_value. // // FetchAndAdd requires the derived class to provide ! // fetch_and_add(addend, dest) // atomically adding addend to the value of dest, and returning the // old value. // // AddAndFetch requires the derived class to provide ! // add_and_fetch(addend, dest) // atomically adding addend to the value of dest, and returning the // new value. // // When D is a pointer type P*, both fetch_and_add and add_and_fetch // treat it as if it were a uintptr_t; they do not perform any --- 257,272 ---- // the arguments the object is called with. If D is a pointer type // P*, then let addend (of type I) be add_value * sizeof(P); // otherwise, addend is add_value. // // FetchAndAdd requires the derived class to provide ! // fetch_and_add(dest, addend) // atomically adding addend to the value of dest, and returning the // old value. // // AddAndFetch requires the derived class to provide ! // add_and_fetch(dest, addend) // atomically adding addend to the value of dest, and returning the // new value. // // When D is a pointer type P*, both fetch_and_add and add_and_fetch // treat it as if it were a uintptr_t; they do not perform any
*** 284,301 **** // invoked on the translated arguments, and the result translated // back. Type is the parameter / return type of the helper // function. No scaling of add_value is performed when D is a pointer // type, so this function can be used to implement the support function // required by AddAndFetch. ! template<typename Type, typename Fn, typename I, typename D> ! static D add_using_helper(Fn fn, I add_value, D volatile* dest); // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformCmpxchg. ! template<typename T, typename D, typename U, typename Enable = void> struct CmpxchgImpl; // Platform-specific implementation of cmpxchg. Support for sizes // of 1, 4, and 8 are required. The class is a function object that // must be default constructable, with these requirements: --- 284,301 ---- // invoked on the translated arguments, and the result translated // back. Type is the parameter / return type of the helper // function. No scaling of add_value is performed when D is a pointer // type, so this function can be used to implement the support function // required by AddAndFetch. ! template<typename Type, typename Fn, typename D, typename I> ! static D add_using_helper(Fn fn, D volatile* dest, I add_value); // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformCmpxchg. ! template<typename D, typename U, typename T, typename Enable = void> struct CmpxchgImpl; // Platform-specific implementation of cmpxchg. Support for sizes // of 1, 4, and 8 are required. The class is a function object that // must be default constructable, with these requirements:
*** 304,318 **** // - exchange_value and compare_value are of type T. // - order is of type atomic_memory_order. // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. // // Then ! // platform_cmpxchg(exchange_value, dest, compare_value, order) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template ! // T operator()(T, T volatile*, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely // specialize the class template for that size. template<size_t byte_size> struct PlatformCmpxchg; --- 304,318 ---- // - exchange_value and compare_value are of type T. // - order is of type atomic_memory_order. // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. // // Then ! // platform_cmpxchg(dest, compare_value, exchange_value, order) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template ! // T operator()(T volatile*, T, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely // specialize the class template for that size. template<size_t byte_size> struct PlatformCmpxchg;
*** 324,336 **** // helper invoked on the translated arguments, and the result // translated back. Type is the parameter / return type of the // helper function. template<typename Type, typename Fn, typename T> static T cmpxchg_using_helper(Fn fn, - T exchange_value, T volatile* dest, ! T compare_value); // Support platforms that do not provide Read-Modify-Write // byte-level atomic access. To use, derive PlatformCmpxchg<1> from // this class. public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. --- 324,336 ---- // helper invoked on the translated arguments, and the result // translated back. Type is the parameter / return type of the // helper function. template<typename Type, typename Fn, typename T> static T cmpxchg_using_helper(Fn fn, T volatile* dest, ! T compare_value, ! T exchange_value); // Support platforms that do not provide Read-Modify-Write // byte-level atomic access. To use, derive PlatformCmpxchg<1> from // this class. public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
*** 339,349 **** // Dispatch handler for xchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformXchg. ! template<typename T, typename D, typename Enable = void> struct XchgImpl; // Platform-specific implementation of xchg. Support for sizes // of 4, and sizeof(intptr_t) are required. The class is a function // object that must be default constructable, with these requirements: --- 339,349 ---- // Dispatch handler for xchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformXchg. ! template<typename D, typename T, typename Enable = void> struct XchgImpl; // Platform-specific implementation of xchg. Support for sizes // of 4, and sizeof(intptr_t) are required. The class is a function // object that must be default constructable, with these requirements:
*** 351,365 **** // - dest is of type T*. // - exchange_value is of type T. // - platform_xchg is an object of type PlatformXchg<sizeof(T)>. // // Then ! // platform_xchg(exchange_value, dest) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template ! // T operator()(T, T volatile*, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely // specialize the class template for that size. template<size_t byte_size> struct PlatformXchg; --- 351,365 ---- // - dest is of type T*. // - exchange_value is of type T. // - platform_xchg is an object of type PlatformXchg<sizeof(T)>. // // Then ! // platform_xchg(dest, exchange_value) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template ! // T operator()(T volatile*, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely // specialize the class template for that size. template<size_t byte_size> struct PlatformXchg;
*** 371,382 **** // helper invoked on the translated arguments, and the result // translated back. Type is the parameter / return type of the // helper function. template<typename Type, typename Fn, typename T> static T xchg_using_helper(Fn fn, ! T exchange_value, ! T volatile* dest); }; template<typename From, typename To> struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { // Determine whether From* is implicitly convertible to To*, using --- 371,382 ---- // helper invoked on the translated arguments, and the result // translated back. Type is the parameter / return type of the // helper function. template<typename Type, typename Fn, typename T> static T xchg_using_helper(Fn fn, ! T volatile* dest, ! T exchange_value); }; template<typename From, typename To> struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { // Determine whether From* is implicitly convertible to To*, using
*** 448,478 **** struct Atomic::StoreImpl< T, T, PlatformOp, typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> { ! void operator()(T new_value, T volatile* dest) const { // Forward to the platform handler for the size of T. ! PlatformOp()(new_value, dest); } }; // Handle store for pointer types. // // The new_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // new_value in the destination. ! template<typename T, typename D, typename PlatformOp> struct Atomic::StoreImpl< ! T*, D*, PlatformOp, typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> { ! void operator()(T* new_value, D* volatile* dest) const { // Allow derived to base conversion, and adding cv-qualifiers. D* value = new_value; ! PlatformOp()(value, dest); } }; // Handle store for types that have a translator. // --- 448,478 ---- struct Atomic::StoreImpl< T, T, PlatformOp, typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> { ! void operator()(T volatile* dest, T new_value) const { // Forward to the platform handler for the size of T. ! PlatformOp()(dest, new_value); } }; // Handle store for pointer types. // // The new_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // new_value in the destination. ! template<typename D, typename T, typename PlatformOp> struct Atomic::StoreImpl< ! D*, T*, PlatformOp, typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> { ! void operator()(D* volatile* dest, T* new_value) const { // Allow derived to base conversion, and adding cv-qualifiers. D* value = new_value; ! PlatformOp()(dest, value); } }; // Handle store for types that have a translator. //
*** 484,499 **** struct Atomic::StoreImpl< T, T, PlatformOp, typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> { ! void operator()(T new_value, T volatile* dest) const { typedef PrimitiveConversions::Translate<T> Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); ! PlatformOp()(Translator::decay(new_value), ! reinterpret_cast<Decayed volatile*>(dest)); } }; // Default implementation of atomic store if a specific platform // does not provide a specialization for a certain size class. --- 484,499 ---- struct Atomic::StoreImpl< T, T, PlatformOp, typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> { ! void operator()(T volatile* dest, T new_value) const { typedef PrimitiveConversions::Translate<T> Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); ! PlatformOp()(reinterpret_cast<Decayed volatile*>(dest), ! Translator::decay(new_value)); } }; // Default implementation of atomic store if a specific platform // does not provide a specialization for a certain size class.
*** 502,513 **** // supports wide atomics, then it has to use specialization // of Atomic::PlatformStore for that wider size class. template<size_t byte_size> struct Atomic::PlatformStore { template<typename T> ! void operator()(T new_value, ! T volatile* dest) const { STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization (void)const_cast<T&>(*dest = new_value); } }; --- 502,513 ---- // supports wide atomics, then it has to use specialization // of Atomic::PlatformStore for that wider size class. template<size_t byte_size> struct Atomic::PlatformStore { template<typename T> ! void operator()(T volatile* dest, ! T new_value) const { STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization (void)const_cast<T&>(*dest = new_value); } };
*** 515,552 **** // platform file, which may use these as base classes, requiring they // be complete. template<typename Derived> struct Atomic::FetchAndAdd { ! template<typename I, typename D> ! D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<typename Derived> struct Atomic::AddAndFetch { ! template<typename I, typename D> ! D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<typename D> inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; ! Atomic::add(I(1), dest, order); } template<typename D> inline void Atomic::dec(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) ! Atomic::add(I(-1), dest, order); } ! template<typename I, typename D> ! inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); STATIC_ASSERT(IsIntegral<I>::value); // If D is a pointer type, use [u]intptr_t as the addend type, // matching signedness of I. Otherwise, use D as the addend type. typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI; --- 515,552 ---- // platform file, which may use these as base classes, requiring they // be complete. template<typename Derived> struct Atomic::FetchAndAdd { ! template<typename D, typename I> ! D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<typename Derived> struct Atomic::AddAndFetch { ! template<typename D, typename I> ! D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<typename D> inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; ! Atomic::add(dest, I(1), order); } template<typename D> inline void Atomic::dec(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) ! Atomic::add(dest, I(-1), order); } ! template<typename D, typename I> ! inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) { STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); STATIC_ASSERT(IsIntegral<I>::value); // If D is a pointer type, use [u]intptr_t as the addend type, // matching signedness of I. Otherwise, use D as the addend type. typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
*** 555,589 **** STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value); STATIC_ASSERT(sizeof(I) <= sizeof(AddendType)); AddendType addend = sub_value; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) // In case AddendType is not signed. ! return Atomic::add(-addend, dest, order); } // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic // specializations of the class. The platform file is responsible for // providing those. template<size_t byte_size> struct Atomic::PlatformCmpxchg { template<typename T> ! T operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const; }; // Define the class before including platform file, which may use this // as a base class, requiring it be complete. The definition is later // in this file, near the other definitions related to cmpxchg. struct Atomic::CmpxchgByteUsingInt { template<typename T> ! T operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const; }; // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations --- 555,589 ---- STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value); STATIC_ASSERT(sizeof(I) <= sizeof(AddendType)); AddendType addend = sub_value; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) // In case AddendType is not signed. ! return Atomic::add(dest, -addend, order); } // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic // specializations of the class. The platform file is responsible for // providing those. template<size_t byte_size> struct Atomic::PlatformCmpxchg { template<typename T> ! T operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const; }; // Define the class before including platform file, which may use this // as a base class, requiring it be complete. The definition is later // in this file, near the other definitions related to cmpxchg. struct Atomic::CmpxchgByteUsingInt { template<typename T> ! T operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const; }; // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations
*** 591,602 **** // specializations of the class. The platform file is responsible for // providing those. template<size_t byte_size> struct Atomic::PlatformXchg { template<typename T> ! T operator()(T exchange_value, ! T volatile* dest, atomic_memory_order order) const; }; template <ScopedFenceType T> class ScopedFenceGeneral: public StackObj { --- 591,602 ---- // specializations of the class. The platform file is responsible for // providing those. template<size_t byte_size> struct Atomic::PlatformXchg { template<typename T> ! T operator()(T volatile* dest, ! T exchange_value, atomic_memory_order order) const; }; template <ScopedFenceType T> class ScopedFenceGeneral: public StackObj {
*** 652,784 **** template <typename T> inline T Atomic::load_acquire(const volatile T* p) { return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p); } ! template<typename T, typename D> ! inline void Atomic::store(T store_value, volatile D* dest) { ! StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest); } template<size_t byte_size, ScopedFenceType type> struct Atomic::PlatformOrderedStore { template <typename T> ! void operator()(T v, volatile T* p) const { ScopedFence<type> f((void*)p); ! Atomic::store(v, p); } }; ! template <typename T, typename D> inline void Atomic::release_store(volatile D* p, T v) { ! StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p); } ! template <typename T, typename D> inline void Atomic::release_store_fence(volatile D* p, T v) { ! StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p); } ! template<typename I, typename D> ! inline D Atomic::add(I add_value, D volatile* dest, atomic_memory_order order) { ! return AddImpl<I, D>()(add_value, dest, order); } ! template<typename I, typename D> struct Atomic::AddImpl< ! I, D, typename EnableIf<IsIntegral<I>::value && IsIntegral<D>::value && (sizeof(I) <= sizeof(D)) && (IsSigned<I>::value == IsSigned<D>::value)>::type> { ! D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { D addend = add_value; ! return PlatformAdd<sizeof(D)>()(addend, dest, order); } }; ! template<typename I, typename P> struct Atomic::AddImpl< ! I, P*, typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> { ! P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type CI; CI addend = add_value; ! return PlatformAdd<sizeof(P*)>()(addend, dest, order); } }; template<typename Derived> ! template<typename I, typename D> ! inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, atomic_memory_order order) const { I addend = add_value; // If D is a pointer type P*, scale by sizeof(P). if (IsPointer<D>::value) { addend *= sizeof(typename RemovePointer<D>::type); } ! D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); return old + add_value; } template<typename Derived> ! template<typename I, typename D> ! inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, atomic_memory_order order) const { // If D is a pointer type P*, scale by sizeof(P). if (IsPointer<D>::value) { add_value *= sizeof(typename RemovePointer<D>::type); } ! return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order); } ! template<typename Type, typename Fn, typename I, typename D> ! inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { return PrimitiveConversions::cast<D>( fn(PrimitiveConversions::cast<Type>(add_value), reinterpret_cast<Type volatile*>(dest))); } ! template<typename T, typename D, typename U> ! inline D Atomic::cmpxchg(T exchange_value, ! D volatile* dest, U compare_value, atomic_memory_order order) { ! return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order); } ! template<typename T, typename D> ! inline bool Atomic::replace_if_null(T* value, D* volatile* dest, atomic_memory_order order) { // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. D* expected_null = NULL; ! return expected_null == cmpxchg(value, dest, expected_null, order); } // Handle cmpxchg for integral and enum types. // // All the involved types must be identical. template<typename T> struct Atomic::CmpxchgImpl< T, T, T, typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> { ! T operator()(T exchange_value, T volatile* dest, T compare_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. ! return PlatformCmpxchg<sizeof(T)>()(exchange_value, ! dest, compare_value, order); } }; // Handle cmpxchg for pointer types. --- 652,784 ---- template <typename T> inline T Atomic::load_acquire(const volatile T* p) { return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p); } ! template<typename D, typename T> ! inline void Atomic::store(volatile D* dest, T store_value) { ! StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value); } template<size_t byte_size, ScopedFenceType type> struct Atomic::PlatformOrderedStore { template <typename T> ! void operator()(volatile T* p, T v) const { ScopedFence<type> f((void*)p); ! Atomic::store(p, v); } }; ! template <typename D, typename T> inline void Atomic::release_store(volatile D* p, T v) { ! StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v); } ! template <typename D, typename T> inline void Atomic::release_store_fence(volatile D* p, T v) { ! StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v); } ! template<typename D, typename I> ! inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { ! return AddImpl<D, I>()(dest, add_value, order); } ! template<typename D, typename I> struct Atomic::AddImpl< ! D, I, typename EnableIf<IsIntegral<I>::value && IsIntegral<D>::value && (sizeof(I) <= sizeof(D)) && (IsSigned<I>::value == IsSigned<D>::value)>::type> { ! D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { D addend = add_value; ! return PlatformAdd<sizeof(D)>()(dest, addend, order); } }; ! template<typename P, typename I> struct Atomic::AddImpl< ! P*, I, typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> { ! P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type CI; CI addend = add_value; ! return PlatformAdd<sizeof(P*)>()(dest, addend, order); } }; template<typename Derived> ! template<typename D, typename I> ! inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { I addend = add_value; // If D is a pointer type P*, scale by sizeof(P). if (IsPointer<D>::value) { addend *= sizeof(typename RemovePointer<D>::type); } ! D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order); return old + add_value; } template<typename Derived> ! template<typename D, typename I> ! inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { // If D is a pointer type P*, scale by sizeof(P). if (IsPointer<D>::value) { add_value *= sizeof(typename RemovePointer<D>::type); } ! return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order); } ! template<typename Type, typename Fn, typename D, typename I> ! inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast<D>( fn(PrimitiveConversions::cast<Type>(add_value), reinterpret_cast<Type volatile*>(dest))); } ! template<typename D, typename U, typename T> ! inline D Atomic::cmpxchg(D volatile* dest, U compare_value, + T exchange_value, atomic_memory_order order) { ! return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order); } ! template<typename D, typename T> ! inline bool Atomic::replace_if_null(D* volatile* dest, T* value, atomic_memory_order order) { // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. D* expected_null = NULL; ! return expected_null == cmpxchg(dest, expected_null, value, order); } // Handle cmpxchg for integral and enum types. // // All the involved types must be identical. template<typename T> struct Atomic::CmpxchgImpl< T, T, T, typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> { ! T operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. ! return PlatformCmpxchg<sizeof(T)>()(dest, compare_value, + exchange_value, order); } }; // Handle cmpxchg for pointer types.
*** 788,812 **** // the compare_value. // // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. ! template<typename T, typename D, typename U> struct Atomic::CmpxchgImpl< ! T*, D*, U*, typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && IsSame<typename RemoveCV<D>::type, typename RemoveCV<U>::type>::value>::type> { ! D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; // Don't care what the CV qualifiers for compare_value are, // but we need to match D* when calling platform support. D* old_value = const_cast<D*>(compare_value); ! return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order); } }; // Handle cmpxchg for types that have a translator. // --- 788,812 ---- // the compare_value. // // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. ! template<typename D, typename U, typename T> struct Atomic::CmpxchgImpl< ! D*, U*, T*, typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && IsSame<typename RemoveCV<D>::type, typename RemoveCV<U>::type>::value>::type> { ! D* operator()(D* volatile* dest, U* compare_value, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; // Don't care what the CV qualifiers for compare_value are, // but we need to match D* when calling platform support. D* old_value = const_cast<D*>(compare_value); ! return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order); } }; // Handle cmpxchg for types that have a translator. //
*** 818,856 **** template<typename T> struct Atomic::CmpxchgImpl< T, T, T, typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> { ! T operator()(T exchange_value, T volatile* dest, T compare_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate<T> Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( ! cmpxchg(Translator::decay(exchange_value), ! reinterpret_cast<Decayed volatile*>(dest), Translator::decay(compare_value), order)); } }; template<typename Type, typename Fn, typename T> inline T Atomic::cmpxchg_using_helper(Fn fn, - T exchange_value, T volatile* dest, ! T compare_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return PrimitiveConversions::cast<T>( fn(PrimitiveConversions::cast<Type>(exchange_value), reinterpret_cast<Type volatile*>(dest), PrimitiveConversions::cast<Type>(compare_value))); } template<typename T> ! inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); uint8_t canon_exchange_value = exchange_value; uint8_t canon_compare_value = compare_value; volatile uint32_t* aligned_dest --- 818,856 ---- template<typename T> struct Atomic::CmpxchgImpl< T, T, T, typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> { ! T operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate<T> Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( ! cmpxchg(reinterpret_cast<Decayed volatile*>(dest), Translator::decay(compare_value), + Translator::decay(exchange_value), order)); } }; template<typename Type, typename Fn, typename T> inline T Atomic::cmpxchg_using_helper(Fn fn, T volatile* dest, ! T compare_value, ! T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return PrimitiveConversions::cast<T>( fn(PrimitiveConversions::cast<Type>(exchange_value), reinterpret_cast<Type volatile*>(dest), PrimitiveConversions::cast<Type>(compare_value))); } template<typename T> ! inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); uint8_t canon_exchange_value = exchange_value; uint8_t canon_compare_value = compare_value; volatile uint32_t* aligned_dest
*** 869,879 **** // value to swap in matches current value ... uint32_t new_value = cur; // ... except for the one byte we want to update reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; ! uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); if (res == cur) break; // success // at least one byte in the int changed value, so update // our view of the current int cur = res; --- 869,879 ---- // value to swap in matches current value ... uint32_t new_value = cur; // ... except for the one byte we want to update reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; ! uint32_t res = cmpxchg(aligned_dest, cur, new_value, order); if (res == cur) break; // success // at least one byte in the int changed value, so update // our view of the current int cur = res;
*** 889,918 **** template<typename T> struct Atomic::XchgImpl< T, T, typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> { ! T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { // Forward to the platform handler for the size of T. ! return PlatformXchg<sizeof(T)>()(exchange_value, dest, order); } }; // Handle xchg for pointer types. // // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. ! template<typename T, typename D> struct Atomic::XchgImpl< ! T*, D*, typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> { ! D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; ! return PlatformXchg<sizeof(D*)>()(new_value, dest, order); } }; // Handle xchg for types that have a translator. // --- 889,918 ---- template<typename T> struct Atomic::XchgImpl< T, T, typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> { ! T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. ! return PlatformXchg<sizeof(T)>()(dest, exchange_value, order); } }; // Handle xchg for pointer types. // // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. ! template<typename D, typename T> struct Atomic::XchgImpl< ! D*, T*, typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> { ! D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; ! return PlatformXchg<sizeof(D*)>()(dest, new_value, order); } }; // Handle xchg for types that have a translator. //
*** 924,955 **** template<typename T> struct Atomic::XchgImpl< T, T, typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> { ! T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { typedef PrimitiveConversions::Translate<T> Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( ! xchg(Translator::decay(exchange_value), ! reinterpret_cast<Decayed volatile*>(dest), order)); } }; template<typename Type, typename Fn, typename T> inline T Atomic::xchg_using_helper(Fn fn, ! T exchange_value, ! T volatile* dest) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return PrimitiveConversions::cast<T>( fn(PrimitiveConversions::cast<Type>(exchange_value), reinterpret_cast<Type volatile*>(dest))); } ! template<typename T, typename D> ! inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) { ! return XchgImpl<T, D>()(exchange_value, dest, order); } #endif // SHARE_RUNTIME_ATOMIC_HPP --- 924,956 ---- template<typename T> struct Atomic::XchgImpl< T, T, typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> { ! T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate<T> Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( ! xchg(reinterpret_cast<Decayed volatile*>(dest), ! Translator::decay(exchange_value), order)); } }; template<typename Type, typename Fn, typename T> inline T Atomic::xchg_using_helper(Fn fn, ! T volatile* dest, ! T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); + // Notice the swapped order of arguments. Change when/if stubs are rewritten. return PrimitiveConversions::cast<T>( fn(PrimitiveConversions::cast<Type>(exchange_value), reinterpret_cast<Type volatile*>(dest))); } ! template<typename D, typename T> ! inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) { ! return XchgImpl<D, T>()(dest, exchange_value, order); } #endif // SHARE_RUNTIME_ATOMIC_HPP
< prev index next >