< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page




 115   // scaled to the size of the type pointed to by the pointer.
 116   template<typename D>
 117   inline static void inc(D volatile* dest,
 118                          atomic_memory_order order = memory_order_conservative);
 119 
 120   // Atomically decrement a location. dec() provide:
 121   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 122   // The type D may be either a pointer type, or an integral
 123   // type. If it is a pointer type, then the decrement is
 124   // scaled to the size of the type pointed to by the pointer.
 125   template<typename D>
 126   inline static void dec(D volatile* dest,
 127                          atomic_memory_order order = memory_order_conservative);
 128 
 129   // Performs atomic exchange of *dest with exchange_value. Returns old
 130   // prior value of *dest. xchg*() provide:
 131   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 132   // The type T must be either a pointer type convertible to or equal
 133   // to D, an integral/enum type equal to D, or a type equal to D that
 134   // is primitive convertible using PrimitiveConversions.
 135   template<typename T, typename D>
 136   inline static D xchg(T exchange_value, volatile D* dest,
 137                        atomic_memory_order order = memory_order_conservative);
 138 
 139   // Performs atomic compare of *dest and compare_value, and exchanges
 140   // *dest with exchange_value if the comparison succeeded. Returns prior
 141   // value of *dest. cmpxchg*() provide:
 142   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 143 
 144   template<typename T, typename D, typename U>
 145   inline static D cmpxchg(T exchange_value,
 146                           D volatile* dest,
 147                           U compare_value,
 148                           atomic_memory_order order = memory_order_conservative);
 149 
 150   // Performs atomic compare of *dest and NULL, and replaces *dest
 151   // with exchange_value if the comparison succeeded.  Returns true if
 152   // the comparison succeeded and the exchange occurred.  This is
 153   // often used as part of lazy initialization, as a lock-free
 154   // alternative to the Double-Checked Locking Pattern.
 155   template<typename T, typename D>
 156   inline static bool replace_if_null(T* value, D* volatile* dest,


 324   // helper invoked on the translated arguments, and the result
 325   // translated back.  Type is the parameter / return type of the
 326   // helper function.
 327   template<typename Type, typename Fn, typename T>
 328   static T cmpxchg_using_helper(Fn fn,
 329                                 T exchange_value,
 330                                 T volatile* dest,
 331                                 T compare_value);
 332 
 333   // Support platforms that do not provide Read-Modify-Write
 334   // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
 335   // this class.
 336 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 337   struct CmpxchgByteUsingInt;
 338 private:
 339 
 340   // Dispatch handler for xchg.  Provides type-based validity
 341   // checking and limited conversions around calls to the
 342   // platform-specific implementation layer provided by
 343   // PlatformXchg.
 344   template<typename T, typename D, typename Enable = void>
 345   struct XchgImpl;
 346 
 347   // Platform-specific implementation of xchg.  Support for sizes
 348   // of 4, and sizeof(intptr_t) are required.  The class is a function
 349   // object that must be default constructable, with these requirements:
 350   //
 351   // - dest is of type T*.
 352   // - exchange_value is of type T.
 353   // - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
 354   //
 355   // Then
 356   //   platform_xchg(exchange_value, dest)
 357   // must be a valid expression, returning a result convertible to T.
 358   //
 359   // A default definition is provided, which declares a function template
 360   //   T operator()(T, T volatile*, T, atomic_memory_order) const
 361   //
 362   // For each required size, a platform must either provide an
 363   // appropriate definition of that function, or must entirely
 364   // specialize the class template for that size.
 365   template<size_t byte_size> struct PlatformXchg;
 366 
 367   // Support for platforms that implement some variants of xchg
 368   // using a (typically out of line) non-template helper function.
 369   // The generic arguments passed to PlatformXchg need to be
 370   // translated to the appropriate type for the helper function, the
 371   // helper invoked on the translated arguments, and the result
 372   // translated back.  Type is the parameter / return type of the
 373   // helper function.
 374   template<typename Type, typename Fn, typename T>
 375   static T xchg_using_helper(Fn fn,
 376                              T exchange_value,
 377                              T volatile* dest);
 378 };
 379 
 380 template<typename From, typename To>
 381 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
 382   // Determine whether From* is implicitly convertible to To*, using
 383   // the "sizeof trick".
 384   typedef char yes;
 385   typedef char (&no)[2];
 386 
 387   static yes test(To*);
 388   static no test(...);
 389   static From* test_value;
 390 
 391   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 392 };
 393 
 394 // Handle load for pointer, integral and enum types.
 395 template<typename T, typename PlatformOp>
 396 struct Atomic::LoadImpl<
 397   T,


 576 
 577 // Define the class before including platform file, which may use this
 578 // as a base class, requiring it be complete.  The definition is later
 579 // in this file, near the other definitions related to cmpxchg.
 580 struct Atomic::CmpxchgByteUsingInt {
 581   template<typename T>
 582   T operator()(T exchange_value,
 583                T volatile* dest,
 584                T compare_value,
 585                atomic_memory_order order) const;
 586 };
 587 
 588 // Define the class before including platform file, which may specialize
 589 // the operator definition.  No generic definition of specializations
 590 // of the operator template are provided, nor are there any generic
 591 // specializations of the class.  The platform file is responsible for
 592 // providing those.
 593 template<size_t byte_size>
 594 struct Atomic::PlatformXchg {
 595   template<typename T>
 596   T operator()(T exchange_value,
 597                T volatile* dest,
 598                atomic_memory_order order) const;
 599 };
 600 
 601 template <ScopedFenceType T>
 602 class ScopedFenceGeneral: public StackObj {
 603  public:
 604   void prefix() {}
 605   void postfix() {}
 606 };
 607 
 608 // The following methods can be specialized using simple template specialization
 609 // in the platform specific files for optimization purposes. Otherwise the
 610 // generalized variant is used.
 611 
 612 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
 613 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
 614 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
 615 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence();   }
 616 
 617 template <ScopedFenceType T>


 874     uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
 875     if (res == cur) break;      // success
 876 
 877     // at least one byte in the int changed value, so update
 878     // our view of the current int
 879     cur = res;
 880     // if our byte is still as cur we loop and try again
 881   } while (cur_as_bytes[offset] == canon_compare_value);
 882 
 883   return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 884 }
 885 
 886 // Handle xchg for integral and enum types.
 887 //
 888 // All the involved types must be identical.
 889 template<typename T>
 890 struct Atomic::XchgImpl<
 891   T, T,
 892   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 893 {
 894   T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
 895     // Forward to the platform handler for the size of T.
 896     return PlatformXchg<sizeof(T)>()(exchange_value, dest, order);
 897   }
 898 };
 899 
 900 // Handle xchg for pointer types.
 901 //
 902 // The exchange_value must be implicitly convertible to the
 903 // destination's type; it must be type-correct to store the
 904 // exchange_value in the destination.
 905 template<typename T, typename D>
 906 struct Atomic::XchgImpl<
 907   T*, D*,
 908   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
 909 {
 910   D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const {
 911     // Allow derived to base conversion, and adding cv-qualifiers.
 912     D* new_value = exchange_value;
 913     return PlatformXchg<sizeof(D*)>()(new_value, dest, order);
 914   }
 915 };
 916 
 917 // Handle xchg for types that have a translator.
 918 //
 919 // All the involved types must be identical.
 920 //
 921 // This translates the original call into a call on the decayed
 922 // arguments, and returns the recovered result of that translated
 923 // call.
 924 template<typename T>
 925 struct Atomic::XchgImpl<
 926   T, T,
 927   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 928 {
 929   T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
 930     typedef PrimitiveConversions::Translate<T> Translator;
 931     typedef typename Translator::Decayed Decayed;
 932     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 933     return Translator::recover(
 934       xchg(Translator::decay(exchange_value),
 935            reinterpret_cast<Decayed volatile*>(dest),
 936            order));
 937   }
 938 };
 939 
 940 template<typename Type, typename Fn, typename T>
 941 inline T Atomic::xchg_using_helper(Fn fn,
 942                                    T exchange_value,
 943                                    T volatile* dest) {
 944   STATIC_ASSERT(sizeof(Type) == sizeof(T));

 945   return PrimitiveConversions::cast<T>(
 946     fn(PrimitiveConversions::cast<Type>(exchange_value),
 947        reinterpret_cast<Type volatile*>(dest)));
 948 }
 949 
 950 template<typename T, typename D>
 951 inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) {
 952   return XchgImpl<T, D>()(exchange_value, dest, order);
 953 }
 954 
 955 #endif // SHARE_RUNTIME_ATOMIC_HPP


 115   // scaled to the size of the type pointed to by the pointer.
 116   template<typename D>
 117   inline static void inc(D volatile* dest,
 118                          atomic_memory_order order = memory_order_conservative);
 119 
 120   // Atomically decrement a location. dec() provide:
 121   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 122   // The type D may be either a pointer type, or an integral
 123   // type. If it is a pointer type, then the decrement is
 124   // scaled to the size of the type pointed to by the pointer.
 125   template<typename D>
 126   inline static void dec(D volatile* dest,
 127                          atomic_memory_order order = memory_order_conservative);
 128 
 129   // Performs atomic exchange of *dest with exchange_value. Returns old
 130   // prior value of *dest. xchg*() provide:
 131   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 132   // The type T must be either a pointer type convertible to or equal
 133   // to D, an integral/enum type equal to D, or a type equal to D that
 134   // is primitive convertible using PrimitiveConversions.
 135   template<typename D, typename T>
 136   inline static D xchg(volatile D* dest, T exchange_value,
 137                        atomic_memory_order order = memory_order_conservative);
 138 
 139   // Performs atomic compare of *dest and compare_value, and exchanges
 140   // *dest with exchange_value if the comparison succeeded. Returns prior
 141   // value of *dest. cmpxchg*() provide:
 142   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 143 
 144   template<typename T, typename D, typename U>
 145   inline static D cmpxchg(T exchange_value,
 146                           D volatile* dest,
 147                           U compare_value,
 148                           atomic_memory_order order = memory_order_conservative);
 149 
 150   // Performs atomic compare of *dest and NULL, and replaces *dest
 151   // with exchange_value if the comparison succeeded.  Returns true if
 152   // the comparison succeeded and the exchange occurred.  This is
 153   // often used as part of lazy initialization, as a lock-free
 154   // alternative to the Double-Checked Locking Pattern.
 155   template<typename T, typename D>
 156   inline static bool replace_if_null(T* value, D* volatile* dest,


 324   // helper invoked on the translated arguments, and the result
 325   // translated back.  Type is the parameter / return type of the
 326   // helper function.
 327   template<typename Type, typename Fn, typename T>
 328   static T cmpxchg_using_helper(Fn fn,
 329                                 T exchange_value,
 330                                 T volatile* dest,
 331                                 T compare_value);
 332 
 333   // Support platforms that do not provide Read-Modify-Write
 334   // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
 335   // this class.
 336 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 337   struct CmpxchgByteUsingInt;
 338 private:
 339 
 340   // Dispatch handler for xchg.  Provides type-based validity
 341   // checking and limited conversions around calls to the
 342   // platform-specific implementation layer provided by
 343   // PlatformXchg.
 344   template<typename D, typename T, typename Enable = void>
 345   struct XchgImpl;
 346 
 347   // Platform-specific implementation of xchg.  Support for sizes
 348   // of 4, and sizeof(intptr_t) are required.  The class is a function
 349   // object that must be default constructable, with these requirements:
 350   //
 351   // - dest is of type T*.
 352   // - exchange_value is of type T.
 353   // - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
 354   //
 355   // Then
 356   //   platform_xchg(dest, exchange_value)
 357   // must be a valid expression, returning a result convertible to T.
 358   //
 359   // A default definition is provided, which declares a function template
 360   //   T operator()(T volatile*, T, atomic_memory_order) const
 361   //
 362   // For each required size, a platform must either provide an
 363   // appropriate definition of that function, or must entirely
 364   // specialize the class template for that size.
 365   template<size_t byte_size> struct PlatformXchg;
 366 
 367   // Support for platforms that implement some variants of xchg
 368   // using a (typically out of line) non-template helper function.
 369   // The generic arguments passed to PlatformXchg need to be
 370   // translated to the appropriate type for the helper function, the
 371   // helper invoked on the translated arguments, and the result
 372   // translated back.  Type is the parameter / return type of the
 373   // helper function.
 374   template<typename Type, typename Fn, typename T>
 375   static T xchg_using_helper(Fn fn,
 376                              T volatile* dest,
 377                              T exchange_value);
 378 };
 379 
 380 template<typename From, typename To>
 381 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
 382   // Determine whether From* is implicitly convertible to To*, using
 383   // the "sizeof trick".
 384   typedef char yes;
 385   typedef char (&no)[2];
 386 
 387   static yes test(To*);
 388   static no test(...);
 389   static From* test_value;
 390 
 391   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 392 };
 393 
 394 // Handle load for pointer, integral and enum types.
 395 template<typename T, typename PlatformOp>
 396 struct Atomic::LoadImpl<
 397   T,


 576 
 577 // Define the class before including platform file, which may use this
 578 // as a base class, requiring it be complete.  The definition is later
 579 // in this file, near the other definitions related to cmpxchg.
 580 struct Atomic::CmpxchgByteUsingInt {
 581   template<typename T>
 582   T operator()(T exchange_value,
 583                T volatile* dest,
 584                T compare_value,
 585                atomic_memory_order order) const;
 586 };
 587 
 588 // Define the class before including platform file, which may specialize
 589 // the operator definition.  No generic definition of specializations
 590 // of the operator template are provided, nor are there any generic
 591 // specializations of the class.  The platform file is responsible for
 592 // providing those.
 593 template<size_t byte_size>
 594 struct Atomic::PlatformXchg {
 595   template<typename T>
 596   T operator()(T volatile* dest,
 597                T exchange_value,
 598                atomic_memory_order order) const;
 599 };
 600 
 601 template <ScopedFenceType T>
 602 class ScopedFenceGeneral: public StackObj {
 603  public:
 604   void prefix() {}
 605   void postfix() {}
 606 };
 607 
 608 // The following methods can be specialized using simple template specialization
 609 // in the platform specific files for optimization purposes. Otherwise the
 610 // generalized variant is used.
 611 
 612 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix()       { OrderAccess::acquire(); }
 613 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix()        { OrderAccess::release(); }
 614 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix()  { OrderAccess::release(); }
 615 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence();   }
 616 
 617 template <ScopedFenceType T>


 874     uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
 875     if (res == cur) break;      // success
 876 
 877     // at least one byte in the int changed value, so update
 878     // our view of the current int
 879     cur = res;
 880     // if our byte is still as cur we loop and try again
 881   } while (cur_as_bytes[offset] == canon_compare_value);
 882 
 883   return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 884 }
 885 
 886 // Handle xchg for integral and enum types.
 887 //
 888 // All the involved types must be identical.
 889 template<typename T>
 890 struct Atomic::XchgImpl<
 891   T, T,
 892   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 893 {
 894   T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
 895     // Forward to the platform handler for the size of T.
 896     return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);
 897   }
 898 };
 899 
 900 // Handle xchg for pointer types.
 901 //
 902 // The exchange_value must be implicitly convertible to the
 903 // destination's type; it must be type-correct to store the
 904 // exchange_value in the destination.
 905 template<typename D, typename T>
 906 struct Atomic::XchgImpl<
 907   D*, T*,
 908   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
 909 {
 910   D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {
 911     // Allow derived to base conversion, and adding cv-qualifiers.
 912     D* new_value = exchange_value;
 913     return PlatformXchg<sizeof(D*)>()(dest, new_value, order);
 914   }
 915 };
 916 
 917 // Handle xchg for types that have a translator.
 918 //
 919 // All the involved types must be identical.
 920 //
 921 // This translates the original call into a call on the decayed
 922 // arguments, and returns the recovered result of that translated
 923 // call.
 924 template<typename T>
 925 struct Atomic::XchgImpl<
 926   T, T,
 927   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 928 {
 929   T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
 930     typedef PrimitiveConversions::Translate<T> Translator;
 931     typedef typename Translator::Decayed Decayed;
 932     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 933     return Translator::recover(
 934       xchg(reinterpret_cast<Decayed volatile*>(dest),
 935            Translator::decay(exchange_value),
 936            order));
 937   }
 938 };
 939 
 940 template<typename Type, typename Fn, typename T>
 941 inline T Atomic::xchg_using_helper(Fn fn,
 942                                    T volatile* dest,
 943                                    T exchange_value) {
 944   STATIC_ASSERT(sizeof(Type) == sizeof(T));
 945   // Notice the swapped order of arguments. Change when/if stubs are rewritten.
 946   return PrimitiveConversions::cast<T>(
 947     fn(PrimitiveConversions::cast<Type>(exchange_value),
 948        reinterpret_cast<Type volatile*>(dest)));
 949 }
 950 
 951 template<typename D, typename T>
 952 inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {
 953   return XchgImpl<D, T>()(dest, exchange_value, order);
 954 }
 955 
 956 #endif // SHARE_RUNTIME_ATOMIC_HPP
< prev index next >