< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_RUNTIME_ATOMIC_HPP
  26 #define SHARE_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "metaprogramming/isIntegral.hpp"
  32 #include "metaprogramming/isPointer.hpp"
  33 #include "metaprogramming/isSame.hpp"
  34 #include "metaprogramming/primitiveConversions.hpp"
  35 #include "metaprogramming/removeCV.hpp"
  36 #include "metaprogramming/removePointer.hpp"

  37 #include "utilities/align.hpp"
  38 #include "utilities/macros.hpp"
  39 
  40 enum atomic_memory_order {
  41   // The modes that align with C++11 are intended to
  42   // follow the same semantics.
  43   memory_order_relaxed = 0,
  44   memory_order_acquire = 2,
  45   memory_order_release = 3,
  46   memory_order_acq_rel = 4,
  47   // Strong two-way memory barrier.
  48   memory_order_conservative = 8
  49 };
  50 






  51 class Atomic : AllStatic {
  52 public:
  53   // Atomic operations on int64 types are not available on all 32-bit
  54   // platforms. If atomic ops on int64 are defined here they must only
  55   // be used from code that verifies they are available at runtime and
  56   // can provide an alternative action if not - see supports_cx8() for
  57   // a means to test availability.
  58 
  59   // The memory operations that are mentioned with each of the atomic
  60   // function families come from src/share/vm/runtime/orderAccess.hpp,
  61   // e.g., <fence> is described in that file and is implemented by the
  62   // OrderAccess::fence() function. See that file for the gory details
  63   // on the Memory Access Ordering Model.
  64 
  65   // All of the atomic operations that imply a read-modify-write action
  66   // guarantee a two-way memory barrier across that operation. Historically
  67   // these semantics reflect the strength of atomic operations that are
  68   // provided on SPARC/X86. We assume that strength is necessary unless
  69   // we can prove that a weaker form is sufficiently safe.
  70 
  71   // Atomically store to a location
  72   // The type T must be either a pointer type convertible to or equal
  73   // to D, an integral/enum type equal to D, or a type equal to D that
  74   // is primitive convertible using PrimitiveConversions.
  75   template<typename T, typename D>
  76   inline static void store(T store_value, volatile D* dest);
  77 






  78   // Atomically load from a location
  79   // The type T must be either a pointer type, an integral/enum type,
  80   // or a type that is primitive convertible using PrimitiveConversions.
  81   template<typename T>
  82   inline static T load(const volatile T* dest);
  83 



  84   // Atomically add to a location. Returns updated value. add*() provide:
  85   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  86 
  87   template<typename I, typename D>
  88   inline static D add(I add_value, D volatile* dest,
  89                       atomic_memory_order order = memory_order_conservative);
  90 
  91   template<typename I, typename D>
  92   inline static D sub(I sub_value, D volatile* dest,
  93                       atomic_memory_order order = memory_order_conservative);
  94 
  95   // Atomically increment location. inc() provide:
  96   // <fence> increment-dest <membar StoreLoad|StoreStore>
  97   // The type D may be either a pointer type, or an integral
  98   // type. If it is a pointer type, then the increment is
  99   // scaled to the size of the type pointed to by the pointer.
 100   template<typename D>
 101   inline static void inc(D volatile* dest,
 102                          atomic_memory_order order = memory_order_conservative);
 103 


 183   template<typename T, typename PlatformOp, typename Enable = void>
 184   struct LoadImpl;
 185 
 186   // Platform-specific implementation of load. Support for sizes of
 187   // 1, 2, 4 bytes and (if different) pointer size bytes are required.
 188   // The class is a function object that must be default
 189   // constructable, with these requirements:
 190   //
 191   // - dest is of type T*, an integral, enum or pointer type, or
 192   //   T is convertible to a primitive type using PrimitiveConversions
 193   // - platform_load is an object of type PlatformLoad<sizeof(T)>.
 194   //
 195   // Then
 196   //   platform_load(src)
 197   // must be a valid expression, returning a result convertible to T.
 198   //
 199   // The default implementation is a volatile load. If a platform
 200   // requires more for e.g. 64 bit loads, a specialization is required
 201   template<size_t byte_size> struct PlatformLoad;
 202 




 203 private:
 204   // Dispatch handler for add.  Provides type-based validity checking
 205   // and limited conversions around calls to the platform-specific
 206   // implementation layer provided by PlatformAdd.
 207   template<typename I, typename D, typename Enable = void>
 208   struct AddImpl;
 209 
 210   // Platform-specific implementation of add.  Support for sizes of 4
 211   // bytes and (if different) pointer size bytes are required.  The
 212   // class is a function object that must be default constructable,
 213   // with these requirements:
 214   //
 215   // - dest is of type D*, an integral or pointer type.
 216   // - add_value is of type I, an integral type.
 217   // - sizeof(I) == sizeof(D).
 218   // - if D is an integral type, I == D.
 219   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 220   //
 221   // Then
 222   //   platform_add(add_value, dest)


 561   template<typename T>
 562   T operator()(T exchange_value,
 563                T volatile* dest,
 564                T compare_value,
 565                atomic_memory_order order) const;
 566 };
 567 
 568 // Define the class before including platform file, which may specialize
 569 // the operator definition.  No generic definition of specializations
 570 // of the operator template are provided, nor are there any generic
 571 // specializations of the class.  The platform file is responsible for
 572 // providing those.
 573 template<size_t byte_size>
 574 struct Atomic::PlatformXchg {
 575   template<typename T>
 576   T operator()(T exchange_value,
 577                T volatile* dest,
 578                atomic_memory_order order) const;
 579 };
 580 


























 581 // platform specific in-line definitions - must come before shared definitions
 582 
 583 #include OS_CPU_HEADER(atomic)
 584 
 585 // shared in-line definitions
 586 
 587 // size_t casts...
 588 #if (SIZE_MAX != UINTPTR_MAX)
 589 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 590 #endif
 591 
 592 template<typename T>
 593 inline T Atomic::load(const volatile T* dest) {
 594   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
 595 }
 596 














 597 template<typename T, typename D>
 598 inline void Atomic::store(T store_value, volatile D* dest) {
 599   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);



















 600 }
 601 
 602 template<typename I, typename D>
 603 inline D Atomic::add(I add_value, D volatile* dest,
 604                      atomic_memory_order order) {
 605   return AddImpl<I, D>()(add_value, dest, order);
 606 }
 607 
 608 template<typename I, typename D>
 609 struct Atomic::AddImpl<
 610   I, D,
 611   typename EnableIf<IsIntegral<I>::value &&
 612                     IsIntegral<D>::value &&
 613                     (sizeof(I) <= sizeof(D)) &&
 614                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 615 {
 616   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
 617     D addend = add_value;
 618     return PlatformAdd<sizeof(D)>()(addend, dest, order);
 619   }




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_RUNTIME_ATOMIC_HPP
  26 #define SHARE_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "metaprogramming/isIntegral.hpp"
  32 #include "metaprogramming/isPointer.hpp"
  33 #include "metaprogramming/isSame.hpp"
  34 #include "metaprogramming/primitiveConversions.hpp"
  35 #include "metaprogramming/removeCV.hpp"
  36 #include "metaprogramming/removePointer.hpp"
  37 #include "runtime/orderAccess.hpp"
  38 #include "utilities/align.hpp"
  39 #include "utilities/macros.hpp"
  40 
  41 enum atomic_memory_order {
  42   // The modes that align with C++11 are intended to
  43   // follow the same semantics.
  44   memory_order_relaxed = 0,
  45   memory_order_acquire = 2,
  46   memory_order_release = 3,
  47   memory_order_acq_rel = 4,
  48   // Strong two-way memory barrier.
  49   memory_order_conservative = 8
  50 };
  51 
  52 enum ScopedFenceType {
  53     X_ACQUIRE
  54   , RELEASE_X
  55   , RELEASE_X_FENCE
  56 };
  57 
  58 class Atomic : AllStatic {
  59 public:
  60   // Atomic operations on int64 types are not available on all 32-bit
  61   // platforms. If atomic ops on int64 are defined here they must only
  62   // be used from code that verifies they are available at runtime and
  63   // can provide an alternative action if not - see supports_cx8() for
  64   // a means to test availability.
  65 
  66   // The memory operations that are mentioned with each of the atomic
  67   // function families come from src/share/vm/runtime/orderAccess.hpp,
  68   // e.g., <fence> is described in that file and is implemented by the
  69   // OrderAccess::fence() function. See that file for the gory details
  70   // on the Memory Access Ordering Model.
  71 
  72   // All of the atomic operations that imply a read-modify-write action
  73   // guarantee a two-way memory barrier across that operation. Historically
  74   // these semantics reflect the strength of atomic operations that are
  75   // provided on SPARC/X86. We assume that strength is necessary unless
  76   // we can prove that a weaker form is sufficiently safe.
  77 
  78   // Atomically store to a location
  79   // The type T must be either a pointer type convertible to or equal
  80   // to D, an integral/enum type equal to D, or a type equal to D that
  81   // is primitive convertible using PrimitiveConversions.
  82   template<typename T, typename D>
  83   inline static void store(T store_value, volatile D* dest);
  84 
  85   template <typename T, typename D>
  86   inline static void release_store(volatile D* dest, T store_value);
  87 
  88   template <typename T, typename D>
  89   inline static void release_store_fence(volatile D* dest, T store_value);
  90 
  91   // Atomically load from a location
  92   // The type T must be either a pointer type, an integral/enum type,
  93   // or a type that is primitive convertible using PrimitiveConversions.
  94   template<typename T>
  95   inline static T load(const volatile T* dest);
  96 
  97   template <typename T>
  98   inline static T load_acquire(const volatile T* dest);
  99 
 100   // Atomically add to a location. Returns updated value. add*() provide:
 101   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 102 
 103   template<typename I, typename D>
 104   inline static D add(I add_value, D volatile* dest,
 105                       atomic_memory_order order = memory_order_conservative);
 106 
 107   template<typename I, typename D>
 108   inline static D sub(I sub_value, D volatile* dest,
 109                       atomic_memory_order order = memory_order_conservative);
 110 
 111   // Atomically increment location. inc() provide:
 112   // <fence> increment-dest <membar StoreLoad|StoreStore>
 113   // The type D may be either a pointer type, or an integral
 114   // type. If it is a pointer type, then the increment is
 115   // scaled to the size of the type pointed to by the pointer.
 116   template<typename D>
 117   inline static void inc(D volatile* dest,
 118                          atomic_memory_order order = memory_order_conservative);
 119 


 199   template<typename T, typename PlatformOp, typename Enable = void>
 200   struct LoadImpl;
 201 
 202   // Platform-specific implementation of load. Support for sizes of
 203   // 1, 2, 4 bytes and (if different) pointer size bytes are required.
 204   // The class is a function object that must be default
 205   // constructable, with these requirements:
 206   //
 207   // - dest is of type T*, an integral, enum or pointer type, or
 208   //   T is convertible to a primitive type using PrimitiveConversions
 209   // - platform_load is an object of type PlatformLoad<sizeof(T)>.
 210   //
 211   // Then
 212   //   platform_load(src)
 213   // must be a valid expression, returning a result convertible to T.
 214   //
 215   // The default implementation is a volatile load. If a platform
 216   // requires more for e.g. 64 bit loads, a specialization is required
 217   template<size_t byte_size> struct PlatformLoad;
 218 
 219   // Give platforms a variation point to specialize.
 220   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
 221   template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
 222 
 223 private:
 224   // Dispatch handler for add.  Provides type-based validity checking
 225   // and limited conversions around calls to the platform-specific
 226   // implementation layer provided by PlatformAdd.
 227   template<typename I, typename D, typename Enable = void>
 228   struct AddImpl;
 229 
 230   // Platform-specific implementation of add.  Support for sizes of 4
 231   // bytes and (if different) pointer size bytes are required.  The
 232   // class is a function object that must be default constructable,
 233   // with these requirements:
 234   //
 235   // - dest is of type D*, an integral or pointer type.
 236   // - add_value is of type I, an integral type.
 237   // - sizeof(I) == sizeof(D).
 238   // - if D is an integral type, I == D.
 239   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 240   //
 241   // Then
 242   //   platform_add(add_value, dest)


 581   template<typename T>
 582   T operator()(T exchange_value,
 583                T volatile* dest,
 584                T compare_value,
 585                atomic_memory_order order) const;
 586 };
 587 
 588 // Define the class before including platform file, which may specialize
 589 // the operator definition.  No generic definition of specializations
 590 // of the operator template are provided, nor are there any generic
 591 // specializations of the class.  The platform file is responsible for
 592 // providing those.
 593 template<size_t byte_size>
 594 struct Atomic::PlatformXchg {
 595   template<typename T>
 596   T operator()(T exchange_value,
 597                T volatile* dest,
 598                atomic_memory_order order) const;
 599 };
 600 
 601 template <ScopedFenceType T>
 602 class ScopedFenceGeneral: public StackObj {
 603  public:
 604   void prefix() {}
 605   void postfix() {}
 606 };
 607 
 608 // The following methods can be specialized using simple template specialization
 609 // in the platform specific files for optimization purposes. Otherwise the
 610 // generalized variant is used.
 611 
 612 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
 613 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
 614 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
 615 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence();   }
 616 
 617 template <ScopedFenceType T>
 618 class ScopedFence : public ScopedFenceGeneral<T> {
 619   void *const _field;
 620  public:
 621   ScopedFence(void *const field) : _field(field) { prefix(); }
 622   ~ScopedFence() { postfix(); }
 623   void prefix() { ScopedFenceGeneral<T>::prefix(); }
 624   void postfix() { ScopedFenceGeneral<T>::postfix(); }
 625 };
 626 
 627 // platform specific in-line definitions - must come before shared definitions
 628 
 629 #include OS_CPU_HEADER(atomic)
 630 
 631 // shared in-line definitions
 632 
 633 // size_t casts...
 634 #if (SIZE_MAX != UINTPTR_MAX)
 635 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 636 #endif
 637 
 638 template<typename T>
 639 inline T Atomic::load(const volatile T* dest) {
 640   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
 641 }
 642 
 643 template<size_t byte_size, ScopedFenceType type>
 644 struct Atomic::PlatformOrderedLoad {
 645   template <typename T>
 646   T operator()(const volatile T* p) const {
 647     ScopedFence<type> f((void*)p);
 648     return Atomic::load(p);
 649   }
 650 };
 651 
 652 template <typename T>
 653 inline T Atomic::load_acquire(const volatile T* p) {
 654   return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
 655 }
 656 
 657 template<typename T, typename D>
 658 inline void Atomic::store(T store_value, volatile D* dest) {
 659   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
 660 }
 661 
 662 template<size_t byte_size, ScopedFenceType type>
 663 struct Atomic::PlatformOrderedStore {
 664   template <typename T>
 665   void operator()(T v, volatile T* p) const {
 666     ScopedFence<type> f((void*)p);
 667     Atomic::store(v, p);
 668   }
 669 };
 670 
 671 template <typename T, typename D>
 672 inline void Atomic::release_store(volatile D* p, T v) {
 673   StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
 674 }
 675 
 676 template <typename T, typename D>
 677 inline void Atomic::release_store_fence(volatile D* p, T v) {
 678   StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
 679 }
 680 
 681 template<typename I, typename D>
 682 inline D Atomic::add(I add_value, D volatile* dest,
 683                      atomic_memory_order order) {
 684   return AddImpl<I, D>()(add_value, dest, order);
 685 }
 686 
 687 template<typename I, typename D>
 688 struct Atomic::AddImpl<
 689   I, D,
 690   typename EnableIf<IsIntegral<I>::value &&
 691                     IsIntegral<D>::value &&
 692                     (sizeof(I) <= sizeof(D)) &&
 693                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 694 {
 695   D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
 696     D addend = add_value;
 697     return PlatformAdd<sizeof(D)>()(addend, dest, order);
 698   }


< prev index next >