< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page




  83   inline static void store(volatile D* dest, T store_value);
  84 
  85   template <typename D, typename T>
  86   inline static void release_store(volatile D* dest, T store_value);
  87 
  88   template <typename D, typename T>
  89   inline static void release_store_fence(volatile D* dest, T store_value);
  90 
  91   // Atomically load from a location
  92   // The type T must be either a pointer type, an integral/enum type,
  93   // or a type that is primitive convertible using PrimitiveConversions.
  94   template<typename T>
  95   inline static T load(const volatile T* dest);
  96 
  97   template <typename T>
  98   inline static T load_acquire(const volatile T* dest);
  99 
 100   // Atomically add to a location. Returns updated value. add*() provide:
 101   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 102 
 103   template<typename I, typename D>
 104   inline static D add(I add_value, D volatile* dest,
 105                       atomic_memory_order order = memory_order_conservative);
 106 
 107   template<typename I, typename D>
 108   inline static D sub(I sub_value, D volatile* dest,
 109                       atomic_memory_order order = memory_order_conservative);
 110 
 111   // Atomically increment location. inc() provide:
 112   // <fence> increment-dest <membar StoreLoad|StoreStore>
 113   // The type D may be either a pointer type, or an integral
 114   // type. If it is a pointer type, then the increment is
 115   // scaled to the size of the type pointed to by the pointer.
 116   template<typename D>
 117   inline static void inc(D volatile* dest,
 118                          atomic_memory_order order = memory_order_conservative);
 119 
 120   // Atomically decrement a location. dec() provide:
 121   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 122   // The type D may be either a pointer type, or an integral
 123   // type. If it is a pointer type, then the decrement is
 124   // scaled to the size of the type pointed to by the pointer.


 207   // - dest is of type T*, an integral, enum or pointer type, or
 208   //   T is convertible to a primitive type using PrimitiveConversions
 209   // - platform_load is an object of type PlatformLoad<sizeof(T)>.
 210   //
 211   // Then
 212   //   platform_load(src)
 213   // must be a valid expression, returning a result convertible to T.
 214   //
 215   // The default implementation is a volatile load. If a platform
 216   // requires more for e.g. 64 bit loads, a specialization is required
 217   template<size_t byte_size> struct PlatformLoad;
 218 
 219   // Give platforms a variation point to specialize.
 220   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
 221   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
 222 
 223 private:
 224   // Dispatch handler for add.  Provides type-based validity checking
 225   // and limited conversions around calls to the platform-specific
 226   // implementation layer provided by PlatformAdd.
 227   template<typename I, typename D, typename Enable = void>
 228   struct AddImpl;
 229 
 230   // Platform-specific implementation of add.  Support for sizes of 4
 231   // bytes and (if different) pointer size bytes are required.  The
 232   // class is a function object that must be default constructable,
 233   // with these requirements:
 234   //
 235   // - dest is of type D*, an integral or pointer type.
 236   // - add_value is of type I, an integral type.
 237   // - sizeof(I) == sizeof(D).
 238   // - if D is an integral type, I == D.
 239   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 240   //
 241   // Then
 242   //   platform_add(add_value, dest)
 243   // must be a valid expression, returning a result convertible to D.
 244   //
 245   // No definition is provided; all platforms must explicitly define
 246   // this class and any needed specializations.
 247   template<size_t byte_size> struct PlatformAdd;
 248 
 249   // Helper base classes for defining PlatformAdd.  To use, define
 250   // PlatformAdd or a specialization that derives from one of these,
 251   // and include in the PlatformAdd definition the support function
 252   // (described below) required by the base class.
 253   //
 254   // These classes implement the required function object protocol for
 255   // PlatformAdd, using a support function template provided by the
 256   // derived class.  Let add_value (of type I) and dest (of type D) be
 257   // the arguments the object is called with.  If D is a pointer type
 258   // P*, then let addend (of type I) be add_value * sizeof(P);
 259   // otherwise, addend is add_value.
 260   //
 261   // FetchAndAdd requires the derived class to provide
 262   //   fetch_and_add(addend, dest)
 263   // atomically adding addend to the value of dest, and returning the
 264   // old value.
 265   //
 266   // AddAndFetch requires the derived class to provide
 267   //   add_and_fetch(addend, dest)
 268   // atomically adding addend to the value of dest, and returning the
 269   // new value.
 270   //
 271   // When D is a pointer type P*, both fetch_and_add and add_and_fetch
 272   // treat it as if it were a uintptr_t; they do not perform any
 273   // scaling of the addend, as that has already been done by the
 274   // caller.
 275 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 276   template<typename Derived> struct FetchAndAdd;
 277   template<typename Derived> struct AddAndFetch;
 278 private:
 279 
 280   // Support for platforms that implement some variants of add using a
 281   // (typically out of line) non-template helper function.  The
 282   // generic arguments passed to PlatformAdd need to be translated to
 283   // the appropriate type for the helper function, the helper function
 284   // invoked on the translated arguments, and the result translated
 285   // back.  Type is the parameter / return type of the helper
 286   // function.  No scaling of add_value is performed when D is a pointer
 287   // type, so this function can be used to implement the support function
 288   // required by AddAndFetch.
 289   template<typename Type, typename Fn, typename I, typename D>
 290   static D add_using_helper(Fn fn, I add_value, D volatile* dest);
 291 
 292   // Dispatch handler for cmpxchg.  Provides type-based validity
 293   // checking and limited conversions around calls to the
 294   // platform-specific implementation layer provided by
 295   // PlatformCmpxchg.
 296   template<typename T, typename D, typename U, typename Enable = void>
 297   struct CmpxchgImpl;
 298 
 299   // Platform-specific implementation of cmpxchg.  Support for sizes
 300   // of 1, 4, and 8 are required.  The class is a function object that
 301   // must be default constructable, with these requirements:
 302   //
 303   // - dest is of type T*.
 304   // - exchange_value and compare_value are of type T.
 305   // - order is of type atomic_memory_order.
 306   // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
 307   //
 308   // Then
 309   //   platform_cmpxchg(exchange_value, dest, compare_value, order)
 310   // must be a valid expression, returning a result convertible to T.


 500 // For increased safety, the default implementation only allows
 501 // storing types that are pointer sized or smaller. If a platform still
 502 // supports wide atomics, then it has to use specialization
 503 // of Atomic::PlatformStore for that wider size class.
 504 template<size_t byte_size>
 505 struct Atomic::PlatformStore {
 506   template<typename T>
 507   void operator()(T volatile* dest,
 508                   T new_value) const {
 509     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 510     (void)const_cast<T&>(*dest = new_value);
 511   }
 512 };
 513 
 514 // Define FetchAndAdd and AddAndFetch helper classes before including
 515 // platform file, which may use these as base classes, requiring they
 516 // be complete.
 517 
 518 template<typename Derived>
 519 struct Atomic::FetchAndAdd {
 520   template<typename I, typename D>
 521   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
 522 };
 523 
 524 template<typename Derived>
 525 struct Atomic::AddAndFetch {
 526   template<typename I, typename D>
 527   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
 528 };
 529 
 530 template<typename D>
 531 inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
 532   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 533   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
 534   Atomic::add(I(1), dest, order);
 535 }
 536 
 537 template<typename D>
 538 inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
 539   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 540   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
 541   // Assumes two's complement integer representation.
 542   #pragma warning(suppress: 4146)
 543   Atomic::add(I(-1), dest, order);
 544 }
 545 
 546 template<typename I, typename D>
 547 inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
 548   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 549   STATIC_ASSERT(IsIntegral<I>::value);
 550   // If D is a pointer type, use [u]intptr_t as the addend type,
 551   // matching signedness of I.  Otherwise, use D as the addend type.
 552   typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
 553   typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
 554   // Only allow conversions that can't change the value.
 555   STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
 556   STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
 557   AddendType addend = sub_value;
 558   // Assumes two's complement integer representation.
 559   #pragma warning(suppress: 4146) // In case AddendType is not signed.
 560   return Atomic::add(-addend, dest, order);
 561 }
 562 
 563 // Define the class before including platform file, which may specialize
 564 // the operator definition.  No generic definition of specializations
 565 // of the operator template are provided, nor are there any generic
 566 // specializations of the class.  The platform file is responsible for
 567 // providing those.
 568 template<size_t byte_size>
 569 struct Atomic::PlatformCmpxchg {
 570   template<typename T>
 571   T operator()(T exchange_value,
 572                T volatile* dest,
 573                T compare_value,
 574                atomic_memory_order order) const;
 575 };
 576 
 577 // Define the class before including platform file, which may use this
 578 // as a base class, requiring it be complete.  The definition is later
 579 // in this file, near the other definitions related to cmpxchg.
 580 struct Atomic::CmpxchgByteUsingInt {


 661 
 662 template<size_t byte_size, ScopedFenceType type>
 663 struct Atomic::PlatformOrderedStore {
 664   template <typename T>
 665   void operator()(volatile T* p, T v) const {
 666     ScopedFence<type> f((void*)p);
 667     Atomic::store(p, v);
 668   }
 669 };
 670 
 671 template <typename D, typename T>
 672 inline void Atomic::release_store(volatile D* p, T v) {
 673   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
 674 }
 675 
 676 template <typename D, typename T>
 677 inline void Atomic::release_store_fence(volatile D* p, T v) {
 678   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
 679 }
 680 
 681 template<typename I, typename D>
 682 inline D Atomic::add(I add_value, D volatile* dest,
 683                      atomic_memory_order order) {
 684   return AddImpl<I, D>()(add_value, dest, order);
 685 }
 686 
 687 template<typename I, typename D>
 688 struct Atomic::AddImpl<
 689   I, D,
 690   typename EnableIf<IsIntegral<I>::value &&
 691                     IsIntegral<D>::value &&
 692                     (sizeof(I) <= sizeof(D)) &&
 693                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 694 {
 695   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
 696     D addend = add_value;
 697     return PlatformAdd<sizeof(D)>()(addend, dest, order);
 698   }
 699 };
 700 
 701 template<typename I, typename P>
 702 struct Atomic::AddImpl<
 703   I, P*,
 704   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 705 {
 706   P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const {
 707     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
 708     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
 709     typedef typename Conditional<IsSigned<I>::value,
 710                                  intptr_t,
 711                                  uintptr_t>::type CI;
 712     CI addend = add_value;
 713     return PlatformAdd<sizeof(P*)>()(addend, dest, order);
 714   }
 715 };
 716 
 717 template<typename Derived>
 718 template<typename I, typename D>
 719 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
 720                                                   atomic_memory_order order) const {
 721   I addend = add_value;
 722   // If D is a pointer type P*, scale by sizeof(P).
 723   if (IsPointer<D>::value) {
 724     addend *= sizeof(typename RemovePointer<D>::type);
 725   }
 726   D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
 727   return old + add_value;
 728 }
 729 
 730 template<typename Derived>
 731 template<typename I, typename D>
 732 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
 733                                                   atomic_memory_order order) const {
 734   // If D is a pointer type P*, scale by sizeof(P).
 735   if (IsPointer<D>::value) {
 736     add_value *= sizeof(typename RemovePointer<D>::type);
 737   }
 738   return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
 739 }
 740 
 741 template<typename Type, typename Fn, typename I, typename D>
 742 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
 743   return PrimitiveConversions::cast<D>(
 744     fn(PrimitiveConversions::cast<Type>(add_value),
 745        reinterpret_cast<Type volatile*>(dest)));
 746 }
 747 
 748 template<typename T, typename D, typename U>
 749 inline D Atomic::cmpxchg(T exchange_value,
 750                          D volatile* dest,
 751                          U compare_value,
 752                          atomic_memory_order order) {
 753   return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
 754 }
 755 
 756 template<typename T, typename D>
 757 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
 758                                     atomic_memory_order order) {
 759   // Presently using a trivial implementation in terms of cmpxchg.
 760   // Consider adding platform support, to permit the use of compiler
 761   // intrinsics like gcc's __sync_bool_compare_and_swap.
 762   D* expected_null = NULL;




  83   inline static void store(volatile D* dest, T store_value);
  84 
  85   template <typename D, typename T>
  86   inline static void release_store(volatile D* dest, T store_value);
  87 
  88   template <typename D, typename T>
  89   inline static void release_store_fence(volatile D* dest, T store_value);
  90 
  91   // Atomically load from a location
  92   // The type T must be either a pointer type, an integral/enum type,
  93   // or a type that is primitive convertible using PrimitiveConversions.
  94   template<typename T>
  95   inline static T load(const volatile T* dest);
  96 
  97   template <typename T>
  98   inline static T load_acquire(const volatile T* dest);
  99 
 100   // Atomically add to a location. Returns updated value. add*() provide:
 101   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 102 
 103   template<typename D, typename I>
 104   inline static D add(D volatile* dest, I add_value,
 105                       atomic_memory_order order = memory_order_conservative);
 106 
 107   template<typename I, typename D>
 108   inline static D sub(I sub_value, D volatile* dest,
 109                       atomic_memory_order order = memory_order_conservative);
 110 
 111   // Atomically increment location. inc() provide:
 112   // <fence> increment-dest <membar StoreLoad|StoreStore>
 113   // The type D may be either a pointer type, or an integral
 114   // type. If it is a pointer type, then the increment is
 115   // scaled to the size of the type pointed to by the pointer.
 116   template<typename D>
 117   inline static void inc(D volatile* dest,
 118                          atomic_memory_order order = memory_order_conservative);
 119 
 120   // Atomically decrement a location. dec() provide:
 121   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 122   // The type D may be either a pointer type, or an integral
 123   // type. If it is a pointer type, then the decrement is
 124   // scaled to the size of the type pointed to by the pointer.


 207   // - dest is of type T*, an integral, enum or pointer type, or
 208   //   T is convertible to a primitive type using PrimitiveConversions
 209   // - platform_load is an object of type PlatformLoad<sizeof(T)>.
 210   //
 211   // Then
 212   //   platform_load(src)
 213   // must be a valid expression, returning a result convertible to T.
 214   //
 215   // The default implementation is a volatile load. If a platform
 216   // requires more for e.g. 64 bit loads, a specialization is required
 217   template<size_t byte_size> struct PlatformLoad;
 218 
 219   // Give platforms a variation point to specialize.
 220   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
 221   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
 222 
 223 private:
 224   // Dispatch handler for add.  Provides type-based validity checking
 225   // and limited conversions around calls to the platform-specific
 226   // implementation layer provided by PlatformAdd.
 227   template<typename D, typename I, typename Enable = void>
 228   struct AddImpl;
 229 
 230   // Platform-specific implementation of add.  Support for sizes of 4
 231   // bytes and (if different) pointer size bytes are required.  The
 232   // class is a function object that must be default constructable,
 233   // with these requirements:
 234   //
 235   // - dest is of type D*, an integral or pointer type.
 236   // - add_value is of type I, an integral type.
 237   // - sizeof(I) == sizeof(D).
 238   // - if D is an integral type, I == D.
 239   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 240   //
 241   // Then
 242   //   platform_add(dest, add_value)
 243   // must be a valid expression, returning a result convertible to D.
 244   //
 245   // No definition is provided; all platforms must explicitly define
 246   // this class and any needed specializations.
 247   template<size_t byte_size> struct PlatformAdd;
 248 
 249   // Helper base classes for defining PlatformAdd.  To use, define
 250   // PlatformAdd or a specialization that derives from one of these,
 251   // and include in the PlatformAdd definition the support function
 252   // (described below) required by the base class.
 253   //
 254   // These classes implement the required function object protocol for
 255   // PlatformAdd, using a support function template provided by the
 256   // derived class.  Let add_value (of type I) and dest (of type D) be
 257   // the arguments the object is called with.  If D is a pointer type
 258   // P*, then let addend (of type I) be add_value * sizeof(P);
 259   // otherwise, addend is add_value.
 260   //
 261   // FetchAndAdd requires the derived class to provide
 262   //   fetch_and_add(dest, addend)
 263   // atomically adding addend to the value of dest, and returning the
 264   // old value.
 265   //
 266   // AddAndFetch requires the derived class to provide
 267   //   add_and_fetch(dest, addend)
 268   // atomically adding addend to the value of dest, and returning the
 269   // new value.
 270   //
 271   // When D is a pointer type P*, both fetch_and_add and add_and_fetch
 272   // treat it as if it were a uintptr_t; they do not perform any
 273   // scaling of the addend, as that has already been done by the
 274   // caller.
 275 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 276   template<typename Derived> struct FetchAndAdd;
 277   template<typename Derived> struct AddAndFetch;
 278 private:
 279 
 280   // Support for platforms that implement some variants of add using a
 281   // (typically out of line) non-template helper function.  The
 282   // generic arguments passed to PlatformAdd need to be translated to
 283   // the appropriate type for the helper function, the helper function
 284   // invoked on the translated arguments, and the result translated
 285   // back.  Type is the parameter / return type of the helper
 286   // function.  No scaling of add_value is performed when D is a pointer
 287   // type, so this function can be used to implement the support function
 288   // required by AddAndFetch.
 289   template<typename Type, typename Fn, typename D, typename I>
 290   static D add_using_helper(Fn fn, D volatile* dest, I add_value);
 291 
 292   // Dispatch handler for cmpxchg.  Provides type-based validity
 293   // checking and limited conversions around calls to the
 294   // platform-specific implementation layer provided by
 295   // PlatformCmpxchg.
 296   template<typename T, typename D, typename U, typename Enable = void>
 297   struct CmpxchgImpl;
 298 
 299   // Platform-specific implementation of cmpxchg.  Support for sizes
 300   // of 1, 4, and 8 are required.  The class is a function object that
 301   // must be default constructable, with these requirements:
 302   //
 303   // - dest is of type T*.
 304   // - exchange_value and compare_value are of type T.
 305   // - order is of type atomic_memory_order.
 306   // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
 307   //
 308   // Then
 309   //   platform_cmpxchg(exchange_value, dest, compare_value, order)
 310   // must be a valid expression, returning a result convertible to T.


 500 // For increased safety, the default implementation only allows
 501 // storing types that are pointer sized or smaller. If a platform still
 502 // supports wide atomics, then it has to use specialization
 503 // of Atomic::PlatformStore for that wider size class.
 504 template<size_t byte_size>
 505 struct Atomic::PlatformStore {
 506   template<typename T>
 507   void operator()(T volatile* dest,
 508                   T new_value) const {
 509     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 510     (void)const_cast<T&>(*dest = new_value);
 511   }
 512 };
 513 
 514 // Define FetchAndAdd and AddAndFetch helper classes before including
 515 // platform file, which may use these as base classes, requiring they
 516 // be complete.
 517 
 518 template<typename Derived>
 519 struct Atomic::FetchAndAdd {
 520   template<typename D, typename I>
 521   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
 522 };
 523 
 524 template<typename Derived>
 525 struct Atomic::AddAndFetch {
 526   template<typename D, typename I>
 527   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
 528 };
 529 
 530 template<typename D>
 531 inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
 532   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 533   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
 534   Atomic::add(dest, I(1), order);
 535 }
 536 
 537 template<typename D>
 538 inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
 539   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 540   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
 541   // Assumes two's complement integer representation.
 542   #pragma warning(suppress: 4146)
 543   Atomic::add(dest, I(-1), order);
 544 }
 545 
 546 template<typename I, typename D>
 547 inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
 548   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 549   STATIC_ASSERT(IsIntegral<I>::value);
 550   // If D is a pointer type, use [u]intptr_t as the addend type,
 551   // matching signedness of I.  Otherwise, use D as the addend type.
 552   typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
 553   typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
 554   // Only allow conversions that can't change the value.
 555   STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
 556   STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
 557   AddendType addend = sub_value;
 558   // Assumes two's complement integer representation.
 559   #pragma warning(suppress: 4146) // In case AddendType is not signed.
 560   return Atomic::add(dest, -addend, order);
 561 }
 562 
 563 // Define the class before including platform file, which may specialize
 564 // the operator definition.  No generic definition of specializations
 565 // of the operator template are provided, nor are there any generic
 566 // specializations of the class.  The platform file is responsible for
 567 // providing those.
 568 template<size_t byte_size>
 569 struct Atomic::PlatformCmpxchg {
 570   template<typename T>
 571   T operator()(T exchange_value,
 572                T volatile* dest,
 573                T compare_value,
 574                atomic_memory_order order) const;
 575 };
 576 
 577 // Define the class before including platform file, which may use this
 578 // as a base class, requiring it be complete.  The definition is later
 579 // in this file, near the other definitions related to cmpxchg.
 580 struct Atomic::CmpxchgByteUsingInt {


 661 
 662 template<size_t byte_size, ScopedFenceType type>
 663 struct Atomic::PlatformOrderedStore {
 664   template <typename T>
 665   void operator()(volatile T* p, T v) const {
 666     ScopedFence<type> f((void*)p);
 667     Atomic::store(p, v);
 668   }
 669 };
 670 
 671 template <typename D, typename T>
 672 inline void Atomic::release_store(volatile D* p, T v) {
 673   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
 674 }
 675 
 676 template <typename D, typename T>
 677 inline void Atomic::release_store_fence(volatile D* p, T v) {
 678   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
 679 }
 680 
 681 template<typename D, typename I>
 682 inline D Atomic::add(D volatile* dest, I add_value,
 683                      atomic_memory_order order) {
 684   return AddImpl<D, I>()(dest, add_value, order);
 685 }
 686 
 687 template<typename D, typename I>
 688 struct Atomic::AddImpl<
 689   D, I,
 690   typename EnableIf<IsIntegral<I>::value &&
 691                     IsIntegral<D>::value &&
 692                     (sizeof(I) <= sizeof(D)) &&
 693                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 694 {
 695   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
 696     D addend = add_value;
 697     return PlatformAdd<sizeof(D)>()(dest, addend, order);
 698   }
 699 };
 700 
 701 template<typename P, typename I>
 702 struct Atomic::AddImpl<
 703   P*, I,
 704   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 705 {
 706   P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
 707     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
 708     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
 709     typedef typename Conditional<IsSigned<I>::value,
 710                                  intptr_t,
 711                                  uintptr_t>::type CI;
 712     CI addend = add_value;
 713     return PlatformAdd<sizeof(P*)>()(dest, addend, order);
 714   }
 715 };
 716 
 717 template<typename Derived>
 718 template<typename D, typename I>
 719 inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
 720                                                   atomic_memory_order order) const {
 721   I addend = add_value;
 722   // If D is a pointer type P*, scale by sizeof(P).
 723   if (IsPointer<D>::value) {
 724     addend *= sizeof(typename RemovePointer<D>::type);
 725   }
 726   D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
 727   return old + add_value;
 728 }
 729 
 730 template<typename Derived>
 731 template<typename D, typename I>
 732 inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
 733                                                   atomic_memory_order order) const {
 734   // If D is a pointer type P*, scale by sizeof(P).
 735   if (IsPointer<D>::value) {
 736     add_value *= sizeof(typename RemovePointer<D>::type);
 737   }
 738   return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
 739 }
 740 
 741 template<typename Type, typename Fn, typename D, typename I>
 742 inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
 743   return PrimitiveConversions::cast<D>(
 744     fn(PrimitiveConversions::cast<Type>(add_value),
 745        reinterpret_cast<Type volatile*>(dest)));
 746 }
 747 
 748 template<typename T, typename D, typename U>
 749 inline D Atomic::cmpxchg(T exchange_value,
 750                          D volatile* dest,
 751                          U compare_value,
 752                          atomic_memory_order order) {
 753   return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
 754 }
 755 
 756 template<typename T, typename D>
 757 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
 758                                     atomic_memory_order order) {
 759   // Presently using a trivial implementation in terms of cmpxchg.
 760   // Consider adding platform support, to permit the use of compiler
 761   // intrinsics like gcc's __sync_bool_compare_and_swap.
 762   D* expected_null = NULL;


< prev index next >