< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page




  62   // be used from code that verifies they are available at runtime and
  63   // can provide an alternative action if not - see supports_cx8() for
  64   // a means to test availability.
  65 
  66   // The memory operations that are mentioned with each of the atomic
  67   // function families come from src/share/vm/runtime/orderAccess.hpp,
  68   // e.g., <fence> is described in that file and is implemented by the
  69   // OrderAccess::fence() function. See that file for the gory details
  70   // on the Memory Access Ordering Model.
  71 
  72   // All of the atomic operations that imply a read-modify-write action
  73   // guarantee a two-way memory barrier across that operation. Historically
  74   // these semantics reflect the strength of atomic operations that are
  75   // provided on SPARC/X86. We assume that strength is necessary unless
  76   // we can prove that a weaker form is sufficiently safe.
  77 
  78   // Atomically store to a location
  79   // The type T must be either a pointer type convertible to or equal
  80   // to D, an integral/enum type equal to D, or a type equal to D that
  81   // is primitive convertible using PrimitiveConversions.
  82   template<typename T, typename D>
  83   inline static void store(T store_value, volatile D* dest);
  84 
  85   template <typename T, typename D>
  86   inline static void release_store(volatile D* dest, T store_value);
  87 
  88   template <typename T, typename D>
  89   inline static void release_store_fence(volatile D* dest, T store_value);
  90 
  91   // Atomically load from a location
  92   // The type T must be either a pointer type, an integral/enum type,
  93   // or a type that is primitive convertible using PrimitiveConversions.
  94   template<typename T>
  95   inline static T load(const volatile T* dest);
  96 
  97   template <typename T>
  98   inline static T load_acquire(const volatile T* dest);
  99 
 100   // Atomically add to a location. Returns updated value. add*() provide:
 101   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 102 
 103   template<typename I, typename D>
 104   inline static D add(I add_value, D volatile* dest,
 105                       atomic_memory_order order = memory_order_conservative);
 106 
 107   template<typename I, typename D>
 108   inline static D sub(I sub_value, D volatile* dest,


 151   // with exchange_value if the comparison succeeded.  Returns true if
 152   // the comparison succeeded and the exchange occurred.  This is
 153   // often used as part of lazy initialization, as a lock-free
 154   // alternative to the Double-Checked Locking Pattern.
 155   template<typename T, typename D>
 156   inline static bool replace_if_null(T* value, D* volatile* dest,
 157                                      atomic_memory_order order = memory_order_conservative);
 158 
 159 private:
 160 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
 161   // Test whether From is implicitly convertible to To.
 162   // From and To must be pointer types.
 163   // Note: Provides the limited subset of C++11 std::is_convertible
 164   // that is needed here.
 165   template<typename From, typename To> struct IsPointerConvertible;
 166 
 167 protected:
 168   // Dispatch handler for store.  Provides type-based validity
 169   // checking and limited conversions around calls to the platform-
 170   // specific implementation layer provided by PlatformOp.
 171   template<typename T, typename D, typename PlatformOp, typename Enable = void>
 172   struct StoreImpl;
 173 
 174   // Platform-specific implementation of store.  Support for sizes
 175   // of 1, 2, 4, and (if different) pointer size bytes are required.
 176   // The class is a function object that must be default constructable,
 177   // with these requirements:
 178   //
 179   // either:
 180   // - dest is of type D*, an integral, enum or pointer type.
 181   // - new_value are of type T, an integral, enum or pointer type D or
 182   //   pointer type convertible to D.
 183   // or:
 184   // - T and D are the same and are primitive convertible using PrimitiveConversions
 185   // and either way:
 186   // - platform_store is an object of type PlatformStore<sizeof(T)>.
 187   //
 188   // Then
 189   //   platform_store(new_value, dest)
 190   // must be a valid expression.
 191   //


 433 // supports wide atomics, then it has to use specialization
 434 // of Atomic::PlatformLoad for that wider size class.
 435 template<size_t byte_size>
 436 struct Atomic::PlatformLoad {
 437   template<typename T>
 438   T operator()(T const volatile* dest) const {
 439     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 440     return *dest;
 441   }
 442 };
 443 
 444 // Handle store for integral and enum types.
 445 //
 446 // All the involved types must be identical.
 447 template<typename T, typename PlatformOp>
 448 struct Atomic::StoreImpl<
 449   T, T,
 450   PlatformOp,
 451   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 452 {
 453   void operator()(T new_value, T volatile* dest) const {
 454     // Forward to the platform handler for the size of T.
 455     PlatformOp()(new_value, dest);
 456   }
 457 };
 458 
 459 // Handle store for pointer types.
 460 //
 461 // The new_value must be implicitly convertible to the
 462 // destination's type; it must be type-correct to store the
 463 // new_value in the destination.
 464 template<typename T, typename D, typename PlatformOp>
 465 struct Atomic::StoreImpl<
 466   T*, D*,
 467   PlatformOp,
 468   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
 469 {
 470   void operator()(T* new_value, D* volatile* dest) const {
 471     // Allow derived to base conversion, and adding cv-qualifiers.
 472     D* value = new_value;
 473     PlatformOp()(value, dest);
 474   }
 475 };
 476 
 477 // Handle store for types that have a translator.
 478 //
 479 // All the involved types must be identical.
 480 //
 481 // This translates the original call into a call on the decayed
 482 // arguments.
 483 template<typename T, typename PlatformOp>
 484 struct Atomic::StoreImpl<
 485   T, T,
 486   PlatformOp,
 487   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 488 {
 489   void operator()(T new_value, T volatile* dest) const {
 490     typedef PrimitiveConversions::Translate<T> Translator;
 491     typedef typename Translator::Decayed Decayed;
 492     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 493     PlatformOp()(Translator::decay(new_value),
 494                  reinterpret_cast<Decayed volatile*>(dest));
 495   }
 496 };
 497 
 498 // Default implementation of atomic store if a specific platform
 499 // does not provide a specialization for a certain size class.
 500 // For increased safety, the default implementation only allows
 501 // storing types that are pointer sized or smaller. If a platform still
 502 // supports wide atomics, then it has to use specialization
 503 // of Atomic::PlatformStore for that wider size class.
 504 template<size_t byte_size>
 505 struct Atomic::PlatformStore {
 506   template<typename T>
 507   void operator()(T new_value,
 508                   T volatile* dest) const {
 509     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 510     (void)const_cast<T&>(*dest = new_value);
 511   }
 512 };
 513 
 514 // Define FetchAndAdd and AddAndFetch helper classes before including
 515 // platform file, which may use these as base classes, requiring they
 516 // be complete.
 517 
 518 template<typename Derived>
 519 struct Atomic::FetchAndAdd {
 520   template<typename I, typename D>
 521   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
 522 };
 523 
 524 template<typename Derived>
 525 struct Atomic::AddAndFetch {
 526   template<typename I, typename D>
 527   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
 528 };


 637 
 638 template<typename T>
 639 inline T Atomic::load(const volatile T* dest) {
 640   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
 641 }
 642 
 643 template<size_t byte_size, ScopedFenceType type>
 644 struct Atomic::PlatformOrderedLoad {
 645   template <typename T>
 646   T operator()(const volatile T* p) const {
 647     ScopedFence<type> f((void*)p);
 648     return Atomic::load(p);
 649   }
 650 };
 651 
 652 template <typename T>
 653 inline T Atomic::load_acquire(const volatile T* p) {
 654   return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
 655 }
 656 
 657 template<typename T, typename D>
 658 inline void Atomic::store(T store_value, volatile D* dest) {
 659   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
 660 }
 661 
 662 template<size_t byte_size, ScopedFenceType type>
 663 struct Atomic::PlatformOrderedStore {
 664   template <typename T>
 665   void operator()(T v, volatile T* p) const {
 666     ScopedFence<type> f((void*)p);
 667     Atomic::store(v, p);
 668   }
 669 };
 670 
 671 template <typename T, typename D>
 672 inline void Atomic::release_store(volatile D* p, T v) {
 673   StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
 674 }
 675 
 676 template <typename T, typename D>
 677 inline void Atomic::release_store_fence(volatile D* p, T v) {
 678   StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
 679 }
 680 
 681 template<typename I, typename D>
 682 inline D Atomic::add(I add_value, D volatile* dest,
 683                      atomic_memory_order order) {
 684   return AddImpl<I, D>()(add_value, dest, order);
 685 }
 686 
 687 template<typename I, typename D>
 688 struct Atomic::AddImpl<
 689   I, D,
 690   typename EnableIf<IsIntegral<I>::value &&
 691                     IsIntegral<D>::value &&
 692                     (sizeof(I) <= sizeof(D)) &&
 693                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 694 {
 695   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
 696     D addend = add_value;
 697     return PlatformAdd<sizeof(D)>()(addend, dest, order);
 698   }




  62   // be used from code that verifies they are available at runtime and
  63   // can provide an alternative action if not - see supports_cx8() for
  64   // a means to test availability.
  65 
  66   // The memory operations that are mentioned with each of the atomic
  67   // function families come from src/share/vm/runtime/orderAccess.hpp,
  68   // e.g., <fence> is described in that file and is implemented by the
  69   // OrderAccess::fence() function. See that file for the gory details
  70   // on the Memory Access Ordering Model.
  71 
  72   // All of the atomic operations that imply a read-modify-write action
  73   // guarantee a two-way memory barrier across that operation. Historically
  74   // these semantics reflect the strength of atomic operations that are
  75   // provided on SPARC/X86. We assume that strength is necessary unless
  76   // we can prove that a weaker form is sufficiently safe.
  77 
  78   // Atomically store to a location
  79   // The type T must be either a pointer type convertible to or equal
  80   // to D, an integral/enum type equal to D, or a type equal to D that
  81   // is primitive convertible using PrimitiveConversions.
  82   template<typename D, typename T>
  83   inline static void store(volatile D* dest, T store_value);
  84 
  85   template <typename D, typename T>
  86   inline static void release_store(volatile D* dest, T store_value);
  87 
  88   template <typename D, typename T>
  89   inline static void release_store_fence(volatile D* dest, T store_value);
  90 
  91   // Atomically load from a location
  92   // The type T must be either a pointer type, an integral/enum type,
  93   // or a type that is primitive convertible using PrimitiveConversions.
  94   template<typename T>
  95   inline static T load(const volatile T* dest);
  96 
  97   template <typename T>
  98   inline static T load_acquire(const volatile T* dest);
  99 
 100   // Atomically add to a location. Returns updated value. add*() provide:
 101   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 102 
 103   template<typename I, typename D>
 104   inline static D add(I add_value, D volatile* dest,
 105                       atomic_memory_order order = memory_order_conservative);
 106 
 107   template<typename I, typename D>
 108   inline static D sub(I sub_value, D volatile* dest,


 151   // with exchange_value if the comparison succeeded.  Returns true if
 152   // the comparison succeeded and the exchange occurred.  This is
 153   // often used as part of lazy initialization, as a lock-free
 154   // alternative to the Double-Checked Locking Pattern.
 155   template<typename T, typename D>
 156   inline static bool replace_if_null(T* value, D* volatile* dest,
 157                                      atomic_memory_order order = memory_order_conservative);
 158 
 159 private:
 160 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
 161   // Test whether From is implicitly convertible to To.
 162   // From and To must be pointer types.
 163   // Note: Provides the limited subset of C++11 std::is_convertible
 164   // that is needed here.
 165   template<typename From, typename To> struct IsPointerConvertible;
 166 
 167 protected:
 168   // Dispatch handler for store.  Provides type-based validity
 169   // checking and limited conversions around calls to the platform-
 170   // specific implementation layer provided by PlatformOp.
 171   template<typename D, typename T, typename PlatformOp, typename Enable = void>
 172   struct StoreImpl;
 173 
 174   // Platform-specific implementation of store.  Support for sizes
 175   // of 1, 2, 4, and (if different) pointer size bytes are required.
 176   // The class is a function object that must be default constructable,
 177   // with these requirements:
 178   //
 179   // either:
 180   // - dest is of type D*, an integral, enum or pointer type.
 181   // - new_value are of type T, an integral, enum or pointer type D or
 182   //   pointer type convertible to D.
 183   // or:
 184   // - T and D are the same and are primitive convertible using PrimitiveConversions
 185   // and either way:
 186   // - platform_store is an object of type PlatformStore<sizeof(T)>.
 187   //
 188   // Then
 189   //   platform_store(new_value, dest)
 190   // must be a valid expression.
 191   //


 433 // supports wide atomics, then it has to use specialization
 434 // of Atomic::PlatformLoad for that wider size class.
 435 template<size_t byte_size>
 436 struct Atomic::PlatformLoad {
 437   template<typename T>
 438   T operator()(T const volatile* dest) const {
 439     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 440     return *dest;
 441   }
 442 };
 443 
 444 // Handle store for integral and enum types.
 445 //
 446 // All the involved types must be identical.
 447 template<typename T, typename PlatformOp>
 448 struct Atomic::StoreImpl<
 449   T, T,
 450   PlatformOp,
 451   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 452 {
 453   void operator()(T volatile* dest, T new_value) const {
 454     // Forward to the platform handler for the size of T.
 455     PlatformOp()(dest, new_value);
 456   }
 457 };
 458 
 459 // Handle store for pointer types.
 460 //
 461 // The new_value must be implicitly convertible to the
 462 // destination's type; it must be type-correct to store the
 463 // new_value in the destination.
 464 template<typename D, typename T, typename PlatformOp>
 465 struct Atomic::StoreImpl<
 466   D*, T*,
 467   PlatformOp,
 468   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
 469 {
 470   void operator()(D* volatile* dest, T* new_value) const {
 471     // Allow derived to base conversion, and adding cv-qualifiers.
 472     D* value = new_value;
 473     PlatformOp()(dest, value);
 474   }
 475 };
 476 
 477 // Handle store for types that have a translator.
 478 //
 479 // All the involved types must be identical.
 480 //
 481 // This translates the original call into a call on the decayed
 482 // arguments.
 483 template<typename T, typename PlatformOp>
 484 struct Atomic::StoreImpl<
 485   T, T,
 486   PlatformOp,
 487   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 488 {
 489   void operator()(T volatile* dest, T new_value) const {
 490     typedef PrimitiveConversions::Translate<T> Translator;
 491     typedef typename Translator::Decayed Decayed;
 492     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 493     PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),
 494                  Translator::decay(new_value));
 495   }
 496 };
 497 
 498 // Default implementation of atomic store if a specific platform
 499 // does not provide a specialization for a certain size class.
 500 // For increased safety, the default implementation only allows
 501 // storing types that are pointer sized or smaller. If a platform still
 502 // supports wide atomics, then it has to use specialization
 503 // of Atomic::PlatformStore for that wider size class.
 504 template<size_t byte_size>
 505 struct Atomic::PlatformStore {
 506   template<typename T>
 507   void operator()(T volatile* dest,
 508                   T new_value) const {
 509     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 510     (void)const_cast<T&>(*dest = new_value);
 511   }
 512 };
 513 
 514 // Define FetchAndAdd and AddAndFetch helper classes before including
 515 // platform file, which may use these as base classes, requiring they
 516 // be complete.
 517 
 518 template<typename Derived>
 519 struct Atomic::FetchAndAdd {
 520   template<typename I, typename D>
 521   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
 522 };
 523 
 524 template<typename Derived>
 525 struct Atomic::AddAndFetch {
 526   template<typename I, typename D>
 527   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
 528 };


 637 
 638 template<typename T>
 639 inline T Atomic::load(const volatile T* dest) {
 640   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
 641 }
 642 
 643 template<size_t byte_size, ScopedFenceType type>
 644 struct Atomic::PlatformOrderedLoad {
 645   template <typename T>
 646   T operator()(const volatile T* p) const {
 647     ScopedFence<type> f((void*)p);
 648     return Atomic::load(p);
 649   }
 650 };
 651 
 652 template <typename T>
 653 inline T Atomic::load_acquire(const volatile T* p) {
 654   return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
 655 }
 656 
 657 template<typename D, typename T>
 658 inline void Atomic::store(volatile D* dest, T store_value) {
 659   StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);
 660 }
 661 
 662 template<size_t byte_size, ScopedFenceType type>
 663 struct Atomic::PlatformOrderedStore {
 664   template <typename T>
 665   void operator()(volatile T* p, T v) const {
 666     ScopedFence<type> f((void*)p);
 667     Atomic::store(p, v);
 668   }
 669 };
 670 
 671 template <typename D, typename T>
 672 inline void Atomic::release_store(volatile D* p, T v) {
 673   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
 674 }
 675 
 676 template <typename D, typename T>
 677 inline void Atomic::release_store_fence(volatile D* p, T v) {
 678   StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
 679 }
 680 
 681 template<typename I, typename D>
 682 inline D Atomic::add(I add_value, D volatile* dest,
 683                      atomic_memory_order order) {
 684   return AddImpl<I, D>()(add_value, dest, order);
 685 }
 686 
 687 template<typename I, typename D>
 688 struct Atomic::AddImpl<
 689   I, D,
 690   typename EnableIf<IsIntegral<I>::value &&
 691                     IsIntegral<D>::value &&
 692                     (sizeof(I) <= sizeof(D)) &&
 693                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 694 {
 695   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
 696     D addend = add_value;
 697     return PlatformAdd<sizeof(D)>()(addend, dest, order);
 698   }


< prev index next >