< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page
rev 47321 : [mq]: Atomic_loadstore


  47  public:
  48   // Atomic operations on jlong types are not available on all 32-bit
  49   // platforms. If atomic ops on jlongs are defined here they must only
  50   // be used from code that verifies they are available at runtime and
  51   // can provide an alternative action if not - see supports_cx8() for
  52   // a means to test availability.
  53 
  54   // The memory operations that are mentioned with each of the atomic
  55   // function families come from src/share/vm/runtime/orderAccess.hpp,
  56   // e.g., <fence> is described in that file and is implemented by the
  57   // OrderAccess::fence() function. See that file for the gory details
  58   // on the Memory Access Ordering Model.
  59 
  60   // All of the atomic operations that imply a read-modify-write action
  61   // guarantee a two-way memory barrier across that operation. Historically
  62   // these semantics reflect the strength of atomic operations that are
  63   // provided on SPARC/X86. We assume that strength is necessary unless
  64   // we can prove that a weaker form is sufficiently safe.
  65 
  66   // Atomically store to a location
  67   inline static void store    (jbyte    store_value, jbyte*    dest);
  68   inline static void store    (jshort   store_value, jshort*   dest);
  69   inline static void store    (jint     store_value, jint*     dest);
  70   // See comment above about using jlong atomics on 32-bit platforms
  71   inline static void store    (jlong    store_value, jlong*    dest);
  72   inline static void store_ptr(intptr_t store_value, intptr_t* dest);
  73   inline static void store_ptr(void*    store_value, void*     dest);
  74 
  75   inline static void store    (jbyte    store_value, volatile jbyte*    dest);
  76   inline static void store    (jshort   store_value, volatile jshort*   dest);
  77   inline static void store    (jint     store_value, volatile jint*     dest);
  78   // See comment above about using jlong atomics on 32-bit platforms
  79   inline static void store    (jlong    store_value, volatile jlong*    dest);
  80   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
  81   inline static void store_ptr(void*    store_value, volatile void*     dest);
  82 





  83   // See comment above about using jlong atomics on 32-bit platforms
  84   inline static jlong load(const volatile jlong* src);



  85 
  86   // Atomically add to a location. Returns updated value. add*() provide:
  87   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  88 
  89   template<typename I, typename D>
  90   inline static D add(I add_value, D volatile* dest);
  91 
  92   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
  93     return add(add_value, dest);
  94   }
  95 
  96   inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
  97     return add(add_value, reinterpret_cast<char* volatile*>(dest));
  98   }
  99 
 100   // Atomically increment location. inc() provide:
 101   // <fence> increment-dest <membar StoreLoad|StoreStore>
 102   // The type D may be either a pointer type, or an integral
 103   // type. If it is a pointer type, then the increment is
 104   // scaled to the size of the type pointed to by the pointer.


 157     return cmpxchg(exchange_value, dest, compare_value, order);
 158   }
 159 
 160   inline static void* cmpxchg_ptr(void* exchange_value,
 161                                   volatile void* dest,
 162                                   void* compare_value,
 163                                   cmpxchg_memory_order order = memory_order_conservative) {
 164     return cmpxchg(exchange_value,
 165                    reinterpret_cast<void* volatile*>(dest),
 166                    compare_value,
 167                    order);
 168   }
 169 
 170 private:
 171   // Test whether From is implicitly convertible to To.
 172   // From and To must be pointer types.
 173   // Note: Provides the limited subset of C++11 std::is_convertible
 174   // that is needed here.
 175   template<typename From, typename To> struct IsPointerConvertible;
 176 



















































 177   // Dispatch handler for add.  Provides type-based validity checking
 178   // and limited conversions around calls to the platform-specific
 179   // implementation layer provided by PlatformAdd.
 180   template<typename I, typename D, typename Enable = void>
 181   struct AddImpl;
 182 
 183   // Platform-specific implementation of add.  Support for sizes of 4
 184   // bytes and (if different) pointer size bytes are required.  The
 185   // class is a function object that must be default constructable,
 186   // with these requirements:
 187   //
 188   // - dest is of type D*, an integral or pointer type.
 189   // - add_value is of type I, an integral type.
 190   // - sizeof(I) == sizeof(D).
 191   // - if D is an integral type, I == D.
 192   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 193   //
 194   // Then
 195   //   platform_add(add_value, dest)
 196   // must be a valid expression, returning a result convertible to D.


 327   template<typename Type, typename Fn, typename T>
 328   static T xchg_using_helper(Fn fn,
 329                              T exchange_value,
 330                              T volatile* dest);
 331 };
 332 
 333 template<typename From, typename To>
 334 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
 335   // Determine whether From* is implicitly convertible to To*, using
 336   // the "sizeof trick".
 337   typedef char yes;
 338   typedef char (&no)[2];
 339 
 340   static yes test(To*);
 341   static no test(...);
 342   static From* test_value;
 343 
 344   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 345 };
 346 





























































































































 347 // Define FetchAndAdd and AddAndFetch helper classes before including
 348 // platform file, which may use these as base classes, requiring they
 349 // be complete.
 350 
 351 template<typename Derived>
 352 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
 353   template<typename I, typename D>
 354   D operator()(I add_value, D volatile* dest) const;
 355 };
 356 
 357 template<typename Derived>
 358 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
 359   template<typename I, typename D>
 360   D operator()(I add_value, D volatile* dest) const;
 361 };
 362 
 363 template<typename D>
 364 inline void Atomic::inc(D volatile* dest) {
 365   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 366   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;


 406 // of the operator template are provided, nor are there any generic
 407 // specializations of the class.  The platform file is responsible for
 408 // providing those.
 409 template<size_t byte_size>
 410 struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
 411   template<typename T>
 412   T operator()(T exchange_value,
 413                T volatile* dest) const;
 414 };
 415 
 416 // platform specific in-line definitions - must come before shared definitions
 417 
 418 #include OS_CPU_HEADER(atomic)
 419 
 420 // shared in-line definitions
 421 
 422 // size_t casts...
 423 #if (SIZE_MAX != UINTPTR_MAX)
 424 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 425 #endif










 426 
 427 template<typename I, typename D>
 428 inline D Atomic::add(I add_value, D volatile* dest) {
 429   return AddImpl<I, D>()(add_value, dest);
 430 }
 431 
 432 template<typename I, typename D>
 433 struct Atomic::AddImpl<
 434   I, D,
 435   typename EnableIf<IsIntegral<I>::value &&
 436                     IsIntegral<D>::value &&
 437                     (sizeof(I) <= sizeof(D)) &&
 438                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 439   VALUE_OBJ_CLASS_SPEC
 440 {
 441   D operator()(I add_value, D volatile* dest) const {
 442     D addend = add_value;
 443     return PlatformAdd<sizeof(D)>()(addend, dest);
 444   }
 445 };




  47  public:
  48   // Atomic operations on jlong types are not available on all 32-bit
  49   // platforms. If atomic ops on jlongs are defined here they must only
  50   // be used from code that verifies they are available at runtime and
  51   // can provide an alternative action if not - see supports_cx8() for
  52   // a means to test availability.
  53 
  54   // The memory operations that are mentioned with each of the atomic
  55   // function families come from src/share/vm/runtime/orderAccess.hpp,
  56   // e.g., <fence> is described in that file and is implemented by the
  57   // OrderAccess::fence() function. See that file for the gory details
  58   // on the Memory Access Ordering Model.
  59 
  60   // All of the atomic operations that imply a read-modify-write action
  61   // guarantee a two-way memory barrier across that operation. Historically
  62   // these semantics reflect the strength of atomic operations that are
  63   // provided on SPARC/X86. We assume that strength is necessary unless
  64   // we can prove that a weaker form is sufficiently safe.
  65 
  66   // Atomically store to a location



  67   // See comment above about using jlong atomics on 32-bit platforms
  68   // The type T must be either a pointer type convertible to or equal
  69   // to D, an integral/enum type equal to D, or a type equal to D that
  70   // is primitive convertible using PrimitiveConversions.
  71   template<typename T, typename D>
  72   inline static void store(T store_value, volatile D* dest);
  73 
  74   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest) {
  75     Atomic::store(store_value, dest);
  76   }


  77 
  78   inline static void store_ptr(void*    store_value, volatile void*     dest) {
  79     Atomic::store(store_value, reinterpret_cast<void* volatile*>(dest));
  80   }
  81 
  82   // Atomically load from a location
  83   // See comment above about using jlong atomics on 32-bit platforms
  84   // The type T must be either a pointer type, an integral/enum type,
  85   // or a type that is primitive convertible using PrimitiveConversions.
  86   template<typename T>
  87   inline static T load(const volatile T* dest);
  88 
  89   // Atomically add to a location. Returns updated value. add*() provide:
  90   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  91 
  92   template<typename I, typename D>
  93   inline static D add(I add_value, D volatile* dest);
  94 
  95   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
  96     return add(add_value, dest);
  97   }
  98 
  99   inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
 100     return add(add_value, reinterpret_cast<char* volatile*>(dest));
 101   }
 102 
 103   // Atomically increment location. inc() provide:
 104   // <fence> increment-dest <membar StoreLoad|StoreStore>
 105   // The type D may be either a pointer type, or an integral
 106   // type. If it is a pointer type, then the increment is
 107   // scaled to the size of the type pointed to by the pointer.


 160     return cmpxchg(exchange_value, dest, compare_value, order);
 161   }
 162 
 163   inline static void* cmpxchg_ptr(void* exchange_value,
 164                                   volatile void* dest,
 165                                   void* compare_value,
 166                                   cmpxchg_memory_order order = memory_order_conservative) {
 167     return cmpxchg(exchange_value,
 168                    reinterpret_cast<void* volatile*>(dest),
 169                    compare_value,
 170                    order);
 171   }
 172 
 173 private:
 174   // Test whether From is implicitly convertible to To.
 175   // From and To must be pointer types.
 176   // Note: Provides the limited subset of C++11 std::is_convertible
 177   // that is needed here.
 178   template<typename From, typename To> struct IsPointerConvertible;
 179 
 180   // Dispatch handler for store.  Provides type-based validity
 181   // checking and limited conversions around calls to the platform-
 182   // specific implementation layer provided by PlatformOp.
 183   template<typename T, typename D, typename PlatformOp, typename Enable = void>
 184   struct StoreImpl;
 185 
 186   // Platform-specific implementation of store.  Support for sizes
 187   // of 1, 2, 4, and (if different) pointer size bytes are required.
 188   // The class is a function object that must be default constructable,
 189   // with these requirements:
 190   //
 191   // either:
 192   // - dest is of type D*, an integral, enum or pointer type.
 193   // - new_value are of type T, an integral, enum or pointer type D or
 194   //   pointer type convertible to D.
 195   // or:
 196   // - T and D are the same and are primitive convertible using PrimitiveConversions
 197   // and either way:
 198   // - platform_store is an object of type PlatformStore<sizeof(T)>.
 199   //
 200   // Then
 201   //   platform_store(new_value, dest)
 202   // must be a valid expression.
 203   //
 204   // The default implementation is a volatile store. If a platform
 205   // requires more for e.g. 64 bit stores, a specialization is required
 206   template<size_t byte_size> struct PlatformStore;
 207 
 208   // Dispatch handler for load.  Provides type-based validity
 209   // checking and limited conversions around calls to the platform-
 210   // specific implementation layer provided by PlatformOp.
 211   template<typename T, typename PlatformOp, typename Enable = void>
 212   struct LoadImpl;
 213 
 214   // Platform-specific implementation of load. Support for sizes of
 215   // 1, 2, 4 bytes and (if different) pointer size bytes are required.
 216   // The class is a function object that must be default
 217   // constructable, with these requirements:
 218   //
 219   // - dest is of type T*, an integral, enum or pointer type, or
 220   //   T is convertible to a primitive type using PrimitiveConversions
 221   // - platform_load is an object of type PlatformLoad<sizeof(T)>.
 222   //
 223   // Then
 224   //   platform_load(src)
 225   // must be a valid expression, returning a result convertible to T.
 226   //
 227   // The default implementation is a volatile load. If a platform
 228   // requires more for e.g. 64 bit loads, a specialization is required
 229   template<size_t byte_size> struct PlatformLoad;
 230 
 231   // Dispatch handler for add.  Provides type-based validity checking
 232   // and limited conversions around calls to the platform-specific
 233   // implementation layer provided by PlatformAdd.
 234   template<typename I, typename D, typename Enable = void>
 235   struct AddImpl;
 236 
 237   // Platform-specific implementation of add.  Support for sizes of 4
 238   // bytes and (if different) pointer size bytes are required.  The
 239   // class is a function object that must be default constructable,
 240   // with these requirements:
 241   //
 242   // - dest is of type D*, an integral or pointer type.
 243   // - add_value is of type I, an integral type.
 244   // - sizeof(I) == sizeof(D).
 245   // - if D is an integral type, I == D.
 246   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 247   //
 248   // Then
 249   //   platform_add(add_value, dest)
 250   // must be a valid expression, returning a result convertible to D.


 381   template<typename Type, typename Fn, typename T>
 382   static T xchg_using_helper(Fn fn,
 383                              T exchange_value,
 384                              T volatile* dest);
 385 };
 386 
 387 template<typename From, typename To>
 388 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
 389   // Determine whether From* is implicitly convertible to To*, using
 390   // the "sizeof trick".
 391   typedef char yes;
 392   typedef char (&no)[2];
 393 
 394   static yes test(To*);
 395   static no test(...);
 396   static From* test_value;
 397 
 398   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 399 };
 400 
 401 // Handle load for pointer, integral and enum types.
 402 template<typename T, typename PlatformOp>
 403 struct Atomic::LoadImpl<
 404   T,
 405   PlatformOp,
 406   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type>
 407   VALUE_OBJ_CLASS_SPEC
 408 {
 409   T operator()(T const volatile* dest) const {
 410     // Forward to the platform handler for the size of T.
 411     return PlatformOp()(dest);
 412   }
 413 };
 414 
 415 // Handle load for types that have a translator.
 416 //
 417 // All the involved types must be identical.
 418 //
 419 // This translates the original call into a call on the decayed
 420 // arguments, and returns the recovered result of that translated
 421 // call.
 422 template<typename T, typename PlatformOp>
 423 struct Atomic::LoadImpl<
 424   T,
 425   PlatformOp,
 426   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 427   VALUE_OBJ_CLASS_SPEC
 428 {
 429   T operator()(T const volatile* dest) const {
 430     typedef PrimitiveConversions::Translate<T> Translator;
 431     typedef typename Translator::Decayed Decayed;
 432     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 433     Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));
 434     return Translator::recover(result);
 435   }
 436 };
 437 
 438 // Default implementation of atomic load if a specific platform
 439 // does not provide a specialization for a certain size class.
 440 // For increased safety, the default implementation only allows
 441 // load types that are pointer sized or smaller. If a platform still
 442 // supports wide atomics, then it has to use specialization
 443 // of Atomic::PlatformLoad for that wider size class.
 444 template<size_t byte_size>
 445 struct Atomic::PlatformLoad VALUE_OBJ_CLASS_SPEC {
 446   template<typename T>
 447   T operator()(T const volatile* dest) const {
 448     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 449     return *dest;
 450   }
 451 };
 452 
 453 // Handle store for integral and enum types.
 454 //
 455 // All the involved types must be identical.
 456 template<typename T, typename PlatformOp>
 457 struct Atomic::StoreImpl<
 458   T, T,
 459   PlatformOp,
 460   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 461   VALUE_OBJ_CLASS_SPEC
 462 {
 463   void operator()(T new_value, T volatile* dest) const {
 464     // Forward to the platform handler for the size of T.
 465     PlatformOp()(new_value, dest);
 466   }
 467 };
 468 
 469 // Handle store for pointer types.
 470 //
 471 // The new_value must be implicitly convertible to the
 472 // destination's type; it must be type-correct to store the
 473 // new_value in the destination.
 474 template<typename T, typename D, typename PlatformOp>
 475 struct Atomic::StoreImpl<
 476   T*, D*,
 477   PlatformOp,
 478   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
 479   VALUE_OBJ_CLASS_SPEC
 480 {
 481   void operator()(T* new_value, D* volatile* dest) const {
 482     // Allow derived to base conversion, and adding cv-qualifiers.
 483     D* value = new_value;
 484     PlatformOp()(value, dest);
 485   }
 486 };
 487 
 488 // Handle store for types that have a translator.
 489 //
 490 // All the involved types must be identical.
 491 //
 492 // This translates the original call into a call on the decayed
 493 // arguments.
 494 template<typename T, typename PlatformOp>
 495 struct Atomic::StoreImpl<
 496   T, T,
 497   PlatformOp,
 498   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 499   VALUE_OBJ_CLASS_SPEC
 500 {
 501   void operator()(T new_value, T volatile* dest) const {
 502     typedef PrimitiveConversions::Translate<T> Translator;
 503     typedef typename Translator::Decayed Decayed;
 504     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 505     PlatformOp()(Translator::decay(new_value),
 506                  reinterpret_cast<Decayed volatile*>(dest));
 507   }
 508 };
 509 
 510 // Default implementation of atomic store if a specific platform
 511 // does not provide a specialization for a certain size class.
 512 // For increased safety, the default implementation only allows
 513 // storing types that are pointer sized or smaller. If a platform still
 514 // supports wide atomics, then it has to use specialization
 515 // of Atomic::PlatformStore for that wider size class.
 516 template<size_t byte_size>
 517 struct Atomic::PlatformStore VALUE_OBJ_CLASS_SPEC {
 518   template<typename T>
 519   void operator()(T new_value,
 520                   T volatile* dest) const {
 521     STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
 522     (void)const_cast<T&>(*dest = new_value);
 523   }
 524 };
 525 
 526 // Define FetchAndAdd and AddAndFetch helper classes before including
 527 // platform file, which may use these as base classes, requiring they
 528 // be complete.
 529 
 530 template<typename Derived>
 531 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
 532   template<typename I, typename D>
 533   D operator()(I add_value, D volatile* dest) const;
 534 };
 535 
 536 template<typename Derived>
 537 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
 538   template<typename I, typename D>
 539   D operator()(I add_value, D volatile* dest) const;
 540 };
 541 
 542 template<typename D>
 543 inline void Atomic::inc(D volatile* dest) {
 544   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
 545   typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;


 585 // of the operator template are provided, nor are there any generic
 586 // specializations of the class.  The platform file is responsible for
 587 // providing those.
 588 template<size_t byte_size>
 589 struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
 590   template<typename T>
 591   T operator()(T exchange_value,
 592                T volatile* dest) const;
 593 };
 594 
 595 // platform specific in-line definitions - must come before shared definitions
 596 
 597 #include OS_CPU_HEADER(atomic)
 598 
 599 // shared in-line definitions
 600 
 601 // size_t casts...
 602 #if (SIZE_MAX != UINTPTR_MAX)
 603 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 604 #endif
 605 
 606 template<typename T>
 607 inline T Atomic::load(const volatile T* dest) {
 608   return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
 609 }
 610 
 611 template<typename T, typename D>
 612 inline void Atomic::store(T store_value, volatile D* dest) {
 613   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
 614 }
 615 
 616 template<typename I, typename D>
 617 inline D Atomic::add(I add_value, D volatile* dest) {
 618   return AddImpl<I, D>()(add_value, dest);
 619 }
 620 
 621 template<typename I, typename D>
 622 struct Atomic::AddImpl<
 623   I, D,
 624   typename EnableIf<IsIntegral<I>::value &&
 625                     IsIntegral<D>::value &&
 626                     (sizeof(I) <= sizeof(D)) &&
 627                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 628   VALUE_OBJ_CLASS_SPEC
 629 {
 630   D operator()(I add_value, D volatile* dest) const {
 631     D addend = add_value;
 632     return PlatformAdd<sizeof(D)>()(addend, dest);
 633   }
 634 };


< prev index next >