< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page




 124   // scaled to the size of the type pointed to by the pointer.
 125   template<typename D>
 126   inline static void dec(D volatile* dest,
 127                          atomic_memory_order order = memory_order_conservative);
 128 
 129   // Performs atomic exchange of *dest with exchange_value. Returns old
 130   // prior value of *dest. xchg*() provide:
 131   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 132   // The type T must be either a pointer type convertible to or equal
 133   // to D, an integral/enum type equal to D, or a type equal to D that
 134   // is primitive convertible using PrimitiveConversions.
 135   template<typename D, typename T>
 136   inline static D xchg(volatile D* dest, T exchange_value,
 137                        atomic_memory_order order = memory_order_conservative);
 138 
 139   // Performs atomic compare of *dest and compare_value, and exchanges
 140   // *dest with exchange_value if the comparison succeeded. Returns prior
 141   // value of *dest. cmpxchg*() provide:
 142   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 143 
 144   template<typename T, typename D, typename U>
 145   inline static D cmpxchg(T exchange_value,
 146                           D volatile* dest,
 147                           U compare_value,

 148                           atomic_memory_order order = memory_order_conservative);
 149 
 150   // Performs atomic compare of *dest and NULL, and replaces *dest
 151   // with exchange_value if the comparison succeeded.  Returns true if
 152   // the comparison succeeded and the exchange occurred.  This is
 153   // often used as part of lazy initialization, as a lock-free
 154   // alternative to the Double-Checked Locking Pattern.
 155   template<typename T, typename D>
 156   inline static bool replace_if_null(T* value, D* volatile* dest,
 157                                      atomic_memory_order order = memory_order_conservative);
 158 
 159 private:
 160 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
 161   // Test whether From is implicitly convertible to To.
 162   // From and To must be pointer types.
 163   // Note: Provides the limited subset of C++11 std::is_convertible
 164   // that is needed here.
 165   template<typename From, typename To> struct IsPointerConvertible;
 166 
 167 protected:
 168   // Dispatch handler for store.  Provides type-based validity
 169   // checking and limited conversions around calls to the platform-
 170   // specific implementation layer provided by PlatformOp.
 171   template<typename D, typename T, typename PlatformOp, typename Enable = void>
 172   struct StoreImpl;
 173 
 174   // Platform-specific implementation of store.  Support for sizes
 175   // of 1, 2, 4, and (if different) pointer size bytes are required.
 176   // The class is a function object that must be default constructable,


 276   template<typename Derived> struct FetchAndAdd;
 277   template<typename Derived> struct AddAndFetch;
 278 private:
 279 
 280   // Support for platforms that implement some variants of add using a
 281   // (typically out of line) non-template helper function.  The
 282   // generic arguments passed to PlatformAdd need to be translated to
 283   // the appropriate type for the helper function, the helper function
 284   // invoked on the translated arguments, and the result translated
 285   // back.  Type is the parameter / return type of the helper
 286   // function.  No scaling of add_value is performed when D is a pointer
 287   // type, so this function can be used to implement the support function
 288   // required by AddAndFetch.
 289   template<typename Type, typename Fn, typename D, typename I>
 290   static D add_using_helper(Fn fn, D volatile* dest, I add_value);
 291 
 292   // Dispatch handler for cmpxchg.  Provides type-based validity
 293   // checking and limited conversions around calls to the
 294   // platform-specific implementation layer provided by
 295   // PlatformCmpxchg.
 296   template<typename T, typename D, typename U, typename Enable = void>
 297   struct CmpxchgImpl;
 298 
 299   // Platform-specific implementation of cmpxchg.  Support for sizes
 300   // of 1, 4, and 8 are required.  The class is a function object that
 301   // must be default constructable, with these requirements:
 302   //
 303   // - dest is of type T*.
 304   // - exchange_value and compare_value are of type T.
 305   // - order is of type atomic_memory_order.
 306   // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
 307   //
 308   // Then
 309   //   platform_cmpxchg(exchange_value, dest, compare_value, order)
 310   // must be a valid expression, returning a result convertible to T.
 311   //
 312   // A default definition is provided, which declares a function template
 313   //   T operator()(T, T volatile*, T, atomic_memory_order) const
 314   //
 315   // For each required size, a platform must either provide an
 316   // appropriate definition of that function, or must entirely
 317   // specialize the class template for that size.
 318   template<size_t byte_size> struct PlatformCmpxchg;
 319 
 320   // Support for platforms that implement some variants of cmpxchg
 321   // using a (typically out of line) non-template helper function.
 322   // The generic arguments passed to PlatformCmpxchg need to be
 323   // translated to the appropriate type for the helper function, the
 324   // helper invoked on the translated arguments, and the result
 325   // translated back.  Type is the parameter / return type of the
 326   // helper function.
 327   template<typename Type, typename Fn, typename T>
 328   static T cmpxchg_using_helper(Fn fn,
 329                                 T exchange_value,
 330                                 T volatile* dest,
 331                                 T compare_value);

 332 
 333   // Support platforms that do not provide Read-Modify-Write
 334   // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
 335   // this class.
 336 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 337   struct CmpxchgByteUsingInt;
 338 private:
 339 
 340   // Dispatch handler for xchg.  Provides type-based validity
 341   // checking and limited conversions around calls to the
 342   // platform-specific implementation layer provided by
 343   // PlatformXchg.
 344   template<typename D, typename T, typename Enable = void>
 345   struct XchgImpl;
 346 
 347   // Platform-specific implementation of xchg.  Support for sizes
 348   // of 4, and sizeof(intptr_t) are required.  The class is a function
 349   // object that must be default constructable, with these requirements:
 350   //
 351   // - dest is of type T*.


 551   // matching signedness of I.  Otherwise, use D as the addend type.
 552   typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
 553   typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
 554   // Only allow conversions that can't change the value.
 555   STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
 556   STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
 557   AddendType addend = sub_value;
 558   // Assumes two's complement integer representation.
 559   #pragma warning(suppress: 4146) // In case AddendType is not signed.
 560   return Atomic::add(dest, -addend, order);
 561 }
 562 
 563 // Define the class before including platform file, which may specialize
 564 // the operator definition.  No generic definition of specializations
 565 // of the operator template are provided, nor are there any generic
 566 // specializations of the class.  The platform file is responsible for
 567 // providing those.
 568 template<size_t byte_size>
 569 struct Atomic::PlatformCmpxchg {
 570   template<typename T>
 571   T operator()(T exchange_value,
 572                T volatile* dest,
 573                T compare_value,

 574                atomic_memory_order order) const;
 575 };
 576 
 577 // Define the class before including platform file, which may use this
 578 // as a base class, requiring it be complete.  The definition is later
 579 // in this file, near the other definitions related to cmpxchg.
 580 struct Atomic::CmpxchgByteUsingInt {
 581   template<typename T>
 582   T operator()(T exchange_value,
 583                T volatile* dest,
 584                T compare_value,

 585                atomic_memory_order order) const;
 586 };
 587 
 588 // Define the class before including platform file, which may specialize
 589 // the operator definition.  No generic definition of specializations
 590 // of the operator template are provided, nor are there any generic
 591 // specializations of the class.  The platform file is responsible for
 592 // providing those.
 593 template<size_t byte_size>
 594 struct Atomic::PlatformXchg {
 595   template<typename T>
 596   T operator()(T volatile* dest,
 597                T exchange_value,
 598                atomic_memory_order order) const;
 599 };
 600 
 601 template <ScopedFenceType T>
 602 class ScopedFenceGeneral: public StackObj {
 603  public:
 604   void prefix() {}


 728 }
 729 
 730 template<typename Derived>
 731 template<typename D, typename I>
 732 inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
 733                                                   atomic_memory_order order) const {
 734   // If D is a pointer type P*, scale by sizeof(P).
 735   if (IsPointer<D>::value) {
 736     add_value *= sizeof(typename RemovePointer<D>::type);
 737   }
 738   return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
 739 }
 740 
 741 template<typename Type, typename Fn, typename D, typename I>
 742 inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
 743   return PrimitiveConversions::cast<D>(
 744     fn(PrimitiveConversions::cast<Type>(add_value),
 745        reinterpret_cast<Type volatile*>(dest)));
 746 }
 747 
 748 template<typename T, typename D, typename U>
 749 inline D Atomic::cmpxchg(T exchange_value,
 750                          D volatile* dest,
 751                          U compare_value,

 752                          atomic_memory_order order) {
 753   return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
 754 }
 755 
 756 template<typename T, typename D>
 757 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
 758                                     atomic_memory_order order) {
 759   // Presently using a trivial implementation in terms of cmpxchg.
 760   // Consider adding platform support, to permit the use of compiler
 761   // intrinsics like gcc's __sync_bool_compare_and_swap.
 762   D* expected_null = NULL;
 763   return expected_null == cmpxchg(value, dest, expected_null, order);
 764 }
 765 
 766 // Handle cmpxchg for integral and enum types.
 767 //
 768 // All the involved types must be identical.
 769 template<typename T>
 770 struct Atomic::CmpxchgImpl<
 771   T, T, T,
 772   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 773 {
 774   T operator()(T exchange_value, T volatile* dest, T compare_value,
 775                atomic_memory_order order) const {
 776     // Forward to the platform handler for the size of T.
 777     return PlatformCmpxchg<sizeof(T)>()(exchange_value,
 778                                         dest,
 779                                         compare_value,

 780                                         order);
 781   }
 782 };
 783 
 784 // Handle cmpxchg for pointer types.
 785 //
 786 // The destination's type and the compare_value type must be the same,
 787 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
 788 // the compare_value.
 789 //
 790 // The exchange_value must be implicitly convertible to the
 791 // destination's type; it must be type-correct to store the
 792 // exchange_value in the destination.
 793 template<typename T, typename D, typename U>
 794 struct Atomic::CmpxchgImpl<
 795   T*, D*, U*,
 796   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
 797                     IsSame<typename RemoveCV<D>::type,
 798                            typename RemoveCV<U>::type>::value>::type>
 799 {
 800   D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
 801                atomic_memory_order order) const {
 802     // Allow derived to base conversion, and adding cv-qualifiers.
 803     D* new_value = exchange_value;
 804     // Don't care what the CV qualifiers for compare_value are,
 805     // but we need to match D* when calling platform support.
 806     D* old_value = const_cast<D*>(compare_value);
 807     return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
 808   }
 809 };
 810 
 811 // Handle cmpxchg for types that have a translator.
 812 //
 813 // All the involved types must be identical.
 814 //
 815 // This translates the original call into a call on the decayed
 816 // arguments, and returns the recovered result of that translated
 817 // call.
 818 template<typename T>
 819 struct Atomic::CmpxchgImpl<
 820   T, T, T,
 821   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 822 {
 823   T operator()(T exchange_value, T volatile* dest, T compare_value,
 824                atomic_memory_order order) const {
 825     typedef PrimitiveConversions::Translate<T> Translator;
 826     typedef typename Translator::Decayed Decayed;
 827     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 828     return Translator::recover(
 829       cmpxchg(Translator::decay(exchange_value),
 830               reinterpret_cast<Decayed volatile*>(dest),
 831               Translator::decay(compare_value),

 832               order));
 833   }
 834 };
 835 
 836 template<typename Type, typename Fn, typename T>
 837 inline T Atomic::cmpxchg_using_helper(Fn fn,
 838                                       T exchange_value,
 839                                       T volatile* dest,
 840                                       T compare_value) {

 841   STATIC_ASSERT(sizeof(Type) == sizeof(T));
 842   return PrimitiveConversions::cast<T>(
 843     fn(PrimitiveConversions::cast<Type>(exchange_value),
 844        reinterpret_cast<Type volatile*>(dest),
 845        PrimitiveConversions::cast<Type>(compare_value)));
 846 }
 847 
 848 template<typename T>
 849 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
 850                                                  T volatile* dest,
 851                                                  T compare_value,

 852                                                  atomic_memory_order order) const {
 853   STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
 854   uint8_t canon_exchange_value = exchange_value;
 855   uint8_t canon_compare_value = compare_value;
 856   volatile uint32_t* aligned_dest
 857     = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
 858   size_t offset = pointer_delta(dest, aligned_dest, 1);
 859   uint32_t cur = *aligned_dest;
 860   uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
 861 
 862   // current value may not be what we are looking for, so force it
 863   // to that value so the initial cmpxchg will fail if it is different
 864   cur_as_bytes[offset] = canon_compare_value;
 865 
 866   // always execute a real cmpxchg so that we get the required memory
 867   // barriers even on initial failure
 868   do {
 869     // value to swap in matches current value ...
 870     uint32_t new_value = cur;
 871     // ... except for the one byte we want to update
 872     reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
 873 
 874     uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
 875     if (res == cur) break;      // success
 876 
 877     // at least one byte in the int changed value, so update
 878     // our view of the current int
 879     cur = res;
 880     // if our byte is still as cur we loop and try again
 881   } while (cur_as_bytes[offset] == canon_compare_value);
 882 
 883   return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 884 }
 885 
 886 // Handle xchg for integral and enum types.
 887 //
 888 // All the involved types must be identical.
 889 template<typename T>
 890 struct Atomic::XchgImpl<
 891   T, T,
 892   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 893 {
 894   T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {




 124   // scaled to the size of the type pointed to by the pointer.
 125   template<typename D>
 126   inline static void dec(D volatile* dest,
 127                          atomic_memory_order order = memory_order_conservative);
 128 
 129   // Performs atomic exchange of *dest with exchange_value. Returns old
 130   // prior value of *dest. xchg*() provide:
 131   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 132   // The type T must be either a pointer type convertible to or equal
 133   // to D, an integral/enum type equal to D, or a type equal to D that
 134   // is primitive convertible using PrimitiveConversions.
 135   template<typename D, typename T>
 136   inline static D xchg(volatile D* dest, T exchange_value,
 137                        atomic_memory_order order = memory_order_conservative);
 138 
 139   // Performs atomic compare of *dest and compare_value, and exchanges
 140   // *dest with exchange_value if the comparison succeeded. Returns prior
 141   // value of *dest. cmpxchg*() provide:
 142   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 143 
 144   template<typename D, typename U, typename T>
 145   inline static D cmpxchg(D volatile* dest,

 146                           U compare_value,
 147                           T exchange_value,
 148                           atomic_memory_order order = memory_order_conservative);
 149 
 150   // Performs atomic compare of *dest and NULL, and replaces *dest
 151   // with exchange_value if the comparison succeeded.  Returns true if
 152   // the comparison succeeded and the exchange occurred.  This is
 153   // often used as part of lazy initialization, as a lock-free
 154   // alternative to the Double-Checked Locking Pattern.
 155   template<typename D, typename T>
 156   inline static bool replace_if_null(D* volatile* dest, T* value,
 157                                      atomic_memory_order order = memory_order_conservative);
 158 
 159 private:
 160 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
 161   // Test whether From is implicitly convertible to To.
 162   // From and To must be pointer types.
 163   // Note: Provides the limited subset of C++11 std::is_convertible
 164   // that is needed here.
 165   template<typename From, typename To> struct IsPointerConvertible;
 166 
 167 protected:
 168   // Dispatch handler for store.  Provides type-based validity
 169   // checking and limited conversions around calls to the platform-
 170   // specific implementation layer provided by PlatformOp.
 171   template<typename D, typename T, typename PlatformOp, typename Enable = void>
 172   struct StoreImpl;
 173 
 174   // Platform-specific implementation of store.  Support for sizes
 175   // of 1, 2, 4, and (if different) pointer size bytes are required.
 176   // The class is a function object that must be default constructable,


 276   template<typename Derived> struct FetchAndAdd;
 277   template<typename Derived> struct AddAndFetch;
 278 private:
 279 
 280   // Support for platforms that implement some variants of add using a
 281   // (typically out of line) non-template helper function.  The
 282   // generic arguments passed to PlatformAdd need to be translated to
 283   // the appropriate type for the helper function, the helper function
 284   // invoked on the translated arguments, and the result translated
 285   // back.  Type is the parameter / return type of the helper
 286   // function.  No scaling of add_value is performed when D is a pointer
 287   // type, so this function can be used to implement the support function
 288   // required by AddAndFetch.
 289   template<typename Type, typename Fn, typename D, typename I>
 290   static D add_using_helper(Fn fn, D volatile* dest, I add_value);
 291 
 292   // Dispatch handler for cmpxchg.  Provides type-based validity
 293   // checking and limited conversions around calls to the
 294   // platform-specific implementation layer provided by
 295   // PlatformCmpxchg.
 296   template<typename D, typename U, typename T, typename Enable = void>
 297   struct CmpxchgImpl;
 298 
 299   // Platform-specific implementation of cmpxchg.  Support for sizes
 300   // of 1, 4, and 8 are required.  The class is a function object that
 301   // must be default constructable, with these requirements:
 302   //
 303   // - dest is of type T*.
 304   // - exchange_value and compare_value are of type T.
 305   // - order is of type atomic_memory_order.
 306   // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
 307   //
 308   // Then
 309   //   platform_cmpxchg(dest, compare_value, exchange_value, order)
 310   // must be a valid expression, returning a result convertible to T.
 311   //
 312   // A default definition is provided, which declares a function template
 313   //   T operator()(T volatile*, T, T, atomic_memory_order) const
 314   //
 315   // For each required size, a platform must either provide an
 316   // appropriate definition of that function, or must entirely
 317   // specialize the class template for that size.
 318   template<size_t byte_size> struct PlatformCmpxchg;
 319 
 320   // Support for platforms that implement some variants of cmpxchg
 321   // using a (typically out of line) non-template helper function.
 322   // The generic arguments passed to PlatformCmpxchg need to be
 323   // translated to the appropriate type for the helper function, the
 324   // helper invoked on the translated arguments, and the result
 325   // translated back.  Type is the parameter / return type of the
 326   // helper function.
 327   template<typename Type, typename Fn, typename T>
 328   static T cmpxchg_using_helper(Fn fn,

 329                                 T volatile* dest,
 330                                 T compare_value,
 331                                 T exchange_value);
 332 
 333   // Support platforms that do not provide Read-Modify-Write
 334   // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
 335   // this class.
 336 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 337   struct CmpxchgByteUsingInt;
 338 private:
 339 
 340   // Dispatch handler for xchg.  Provides type-based validity
 341   // checking and limited conversions around calls to the
 342   // platform-specific implementation layer provided by
 343   // PlatformXchg.
 344   template<typename D, typename T, typename Enable = void>
 345   struct XchgImpl;
 346 
 347   // Platform-specific implementation of xchg.  Support for sizes
 348   // of 4, and sizeof(intptr_t) are required.  The class is a function
 349   // object that must be default constructable, with these requirements:
 350   //
 351   // - dest is of type T*.


 551   // matching signedness of I.  Otherwise, use D as the addend type.
 552   typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
 553   typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
 554   // Only allow conversions that can't change the value.
 555   STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
 556   STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
 557   AddendType addend = sub_value;
 558   // Assumes two's complement integer representation.
 559   #pragma warning(suppress: 4146) // In case AddendType is not signed.
 560   return Atomic::add(dest, -addend, order);
 561 }
 562 
 563 // Define the class before including platform file, which may specialize
 564 // the operator definition.  No generic definition of specializations
 565 // of the operator template are provided, nor are there any generic
 566 // specializations of the class.  The platform file is responsible for
 567 // providing those.
 568 template<size_t byte_size>
 569 struct Atomic::PlatformCmpxchg {
 570   template<typename T>
 571   T operator()(T volatile* dest,

 572                T compare_value,
 573                T exchange_value,
 574                atomic_memory_order order) const;
 575 };
 576 
 577 // Define the class before including platform file, which may use this
 578 // as a base class, requiring it be complete.  The definition is later
 579 // in this file, near the other definitions related to cmpxchg.
 580 struct Atomic::CmpxchgByteUsingInt {
 581   template<typename T>
 582   T operator()(T volatile* dest,

 583                T compare_value,
 584                T exchange_value,
 585                atomic_memory_order order) const;
 586 };
 587 
 588 // Define the class before including platform file, which may specialize
 589 // the operator definition.  No generic definition of specializations
 590 // of the operator template are provided, nor are there any generic
 591 // specializations of the class.  The platform file is responsible for
 592 // providing those.
 593 template<size_t byte_size>
 594 struct Atomic::PlatformXchg {
 595   template<typename T>
 596   T operator()(T volatile* dest,
 597                T exchange_value,
 598                atomic_memory_order order) const;
 599 };
 600 
 601 template <ScopedFenceType T>
 602 class ScopedFenceGeneral: public StackObj {
 603  public:
 604   void prefix() {}


 728 }
 729 
 730 template<typename Derived>
 731 template<typename D, typename I>
 732 inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
 733                                                   atomic_memory_order order) const {
 734   // If D is a pointer type P*, scale by sizeof(P).
 735   if (IsPointer<D>::value) {
 736     add_value *= sizeof(typename RemovePointer<D>::type);
 737   }
 738   return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
 739 }
 740 
 741 template<typename Type, typename Fn, typename D, typename I>
 742 inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
 743   return PrimitiveConversions::cast<D>(
 744     fn(PrimitiveConversions::cast<Type>(add_value),
 745        reinterpret_cast<Type volatile*>(dest)));
 746 }
 747 
 748 template<typename D, typename U, typename T>
 749 inline D Atomic::cmpxchg(D volatile* dest,

 750                          U compare_value,
 751                          T exchange_value,
 752                          atomic_memory_order order) {
 753   return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order);
 754 }
 755 
 756 template<typename D, typename T>
 757 inline bool Atomic::replace_if_null(D* volatile* dest, T* value,
 758                                     atomic_memory_order order) {
 759   // Presently using a trivial implementation in terms of cmpxchg.
 760   // Consider adding platform support, to permit the use of compiler
 761   // intrinsics like gcc's __sync_bool_compare_and_swap.
 762   D* expected_null = NULL;
 763   return expected_null == cmpxchg(dest, expected_null, value, order);
 764 }
 765 
 766 // Handle cmpxchg for integral and enum types.
 767 //
 768 // All the involved types must be identical.
 769 template<typename T>
 770 struct Atomic::CmpxchgImpl<
 771   T, T, T,
 772   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 773 {
 774   T operator()(T volatile* dest, T compare_value, T exchange_value,
 775                atomic_memory_order order) const {
 776     // Forward to the platform handler for the size of T.
 777     return PlatformCmpxchg<sizeof(T)>()(dest,

 778                                         compare_value,
 779                                         exchange_value,
 780                                         order);
 781   }
 782 };
 783 
 784 // Handle cmpxchg for pointer types.
 785 //
 786 // The destination's type and the compare_value type must be the same,
 787 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
 788 // the compare_value.
 789 //
 790 // The exchange_value must be implicitly convertible to the
 791 // destination's type; it must be type-correct to store the
 792 // exchange_value in the destination.
 793 template<typename D, typename U, typename T>
 794 struct Atomic::CmpxchgImpl<
 795   D*, U*, T*,
 796   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
 797                     IsSame<typename RemoveCV<D>::type,
 798                            typename RemoveCV<U>::type>::value>::type>
 799 {
 800   D* operator()(D* volatile* dest, U* compare_value, T* exchange_value,
 801                atomic_memory_order order) const {
 802     // Allow derived to base conversion, and adding cv-qualifiers.
 803     D* new_value = exchange_value;
 804     // Don't care what the CV qualifiers for compare_value are,
 805     // but we need to match D* when calling platform support.
 806     D* old_value = const_cast<D*>(compare_value);
 807     return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order);
 808   }
 809 };
 810 
 811 // Handle cmpxchg for types that have a translator.
 812 //
 813 // All the involved types must be identical.
 814 //
 815 // This translates the original call into a call on the decayed
 816 // arguments, and returns the recovered result of that translated
 817 // call.
 818 template<typename T>
 819 struct Atomic::CmpxchgImpl<
 820   T, T, T,
 821   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 822 {
 823   T operator()(T volatile* dest, T compare_value, T exchange_value,
 824                atomic_memory_order order) const {
 825     typedef PrimitiveConversions::Translate<T> Translator;
 826     typedef typename Translator::Decayed Decayed;
 827     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 828     return Translator::recover(
 829       cmpxchg(reinterpret_cast<Decayed volatile*>(dest),

 830               Translator::decay(compare_value),
 831               Translator::decay(exchange_value),
 832               order));
 833   }
 834 };
 835 
 836 template<typename Type, typename Fn, typename T>
 837 inline T Atomic::cmpxchg_using_helper(Fn fn,

 838                                       T volatile* dest,
 839                                       T compare_value,
 840                                       T exchange_value) {
 841   STATIC_ASSERT(sizeof(Type) == sizeof(T));
 842   return PrimitiveConversions::cast<T>(
 843     fn(PrimitiveConversions::cast<Type>(exchange_value),
 844        reinterpret_cast<Type volatile*>(dest),
 845        PrimitiveConversions::cast<Type>(compare_value)));
 846 }
 847 
 848 template<typename T>
 849 inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest,

 850                                                  T compare_value,
 851                                                  T exchange_value,
 852                                                  atomic_memory_order order) const {
 853   STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
 854   uint8_t canon_exchange_value = exchange_value;
 855   uint8_t canon_compare_value = compare_value;
 856   volatile uint32_t* aligned_dest
 857     = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
 858   size_t offset = pointer_delta(dest, aligned_dest, 1);
 859   uint32_t cur = *aligned_dest;
 860   uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
 861 
 862   // current value may not be what we are looking for, so force it
 863   // to that value so the initial cmpxchg will fail if it is different
 864   cur_as_bytes[offset] = canon_compare_value;
 865 
 866   // always execute a real cmpxchg so that we get the required memory
 867   // barriers even on initial failure
 868   do {
 869     // value to swap in matches current value ...
 870     uint32_t new_value = cur;
 871     // ... except for the one byte we want to update
 872     reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
 873 
 874     uint32_t res = cmpxchg(aligned_dest, cur, new_value, order);
 875     if (res == cur) break;      // success
 876 
 877     // at least one byte in the int changed value, so update
 878     // our view of the current int
 879     cur = res;
 880     // if our byte is still as cur we loop and try again
 881   } while (cur_as_bytes[offset] == canon_compare_value);
 882 
 883   return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 884 }
 885 
 886 // Handle xchg for integral and enum types.
 887 //
 888 // All the involved types must be identical.
 889 template<typename T>
 890 struct Atomic::XchgImpl<
 891   T, T,
 892   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 893 {
 894   T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {


< prev index next >