< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page
rev 13452 : imported patch Atomic_cmpxchg
rev 13453 : imported patch Atomic_add
rev 13454 : [mq]: Atomic_add_v2


 202   // P*, then let addend (of type I) be add_value * sizeof(P);
 203   // otherwise, addend is add_value.
 204   //
 205   // FetchAndAdd requires the derived class to provide
 206   //   fetch_and_add(addend, dest)
 207   // atomically adding addend to the value of dest, and returning the
 208   // old value.
 209   //
 210   // AddAndFetch requires the derived class to provide
 211   //   add_and_fetch(addend, dest)
 212   // atomically adding addend to the value of dest, and returning the
 213   // new value.
 214   //
 215   // When D is a pointer type P*, both fetch_and_add and add_and_fetch
 216   // treat it as if it were a uintptr_t; they do not perform any
 217   // scaling of the addend, as that has already been done by the
 218   // caller.
 219 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 220   template<typename Derived> struct FetchAndAdd;
 221   template<typename Derived> struct AddAndFetch;

 222 private:
 223 
 224   // Support for platforms that implement some variants of add using a
 225   // (typically out of line) non-template helper function.  The
 226   // generic arguments passed to PlatformAdd need to be translated to
 227   // the appropriate type for the helper function, the helper function
 228   // invoked on the translated arguments, and the result translated
 229   // back.  Type is the parameter / return type of the helper
 230   // function.  No scaling of add_value is performed when D is a pointer
 231   // type, so this function can be used to implement the support function
 232   // required by AddAndFetch.
 233   template<typename Type, typename Fn, typename I, typename D>
 234   static D add_using_helper(Fn fn, I add_value, D volatile* dest);
 235 
 236   // Dispatch handler for cmpxchg.  Provides type-based validity
 237   // checking and limited conversions around calls to the
 238   // platform-specific implementation layer provided by
 239   // PlatformCmpxchg.
 240   template<typename T, typename D, typename U, typename Enable = void>
 241   struct CmpxchgImpl;


 295 
 296   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 297 };
 298 
 299 // Define FetchAndAdd and AddAndFetch helper classes before including
 300 // platform file, which may use these as base classes, requiring they
 301 // be complete.
 302 
 303 template<typename Derived>
 304 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
 305   template<typename I, typename D>
 306   D operator()(I add_value, D volatile* dest) const;
 307 };
 308 
 309 template<typename Derived>
 310 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
 311   template<typename I, typename D>
 312   D operator()(I add_value, D volatile* dest) const;
 313 };
 314 














 315 // Define the class before including platform file, which may specialize
 316 // the operator definition.  No generic definition of specializations
 317 // of the operator template are provided, nor are there any generic
 318 // specializations of the class.  The platform file is responsible for
 319 // providing those.
 320 template<size_t byte_size>
 321 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
 322   template<typename T>
 323   T operator()(T exchange_value,
 324                T volatile* dest,
 325                T compare_value,
 326                cmpxchg_memory_order order) const;
 327 };
 328 
 329 // Define the class before including platform file, which may use this
 330 // as a base class, requiring it be complete.  The definition is later
 331 // in this file, near the other definitions related to cmpxchg.
 332 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
 333   template<typename T>
 334   T operator()(T exchange_value,


 368   }
 369 };
 370 
 371 template<typename I, typename P>
 372 struct Atomic::AddImpl<
 373   I, P*,
 374   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 375   VALUE_OBJ_CLASS_SPEC
 376 {
 377   P* operator()(I add_value, P* volatile* dest) const {
 378     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
 379     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
 380     typedef typename Conditional<IsSigned<I>::value,
 381                                  intptr_t,
 382                                  uintptr_t>::type CI;
 383     CI addend = add_value;
 384     return PlatformAdd<sizeof(P*)>()(addend, dest);
 385   }
 386 };
 387 
 388 // Most platforms do not support atomic add on a 2-byte value. However,
 389 // if the value occupies the most significant 16 bits of an aligned 32-bit
 390 // word, then we can do this with an atomic add of (add_value << 16)
 391 // to the 32-bit word.
 392 //
 393 // The least significant parts of this 32-bit word will never be affected, even
 394 // in case of overflow/underflow.
 395 //
 396 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 397 template<>
 398 struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
 399   jshort operator()(jshort add_value, jshort volatile* dest) const {
 400 #ifdef VM_LITTLE_ENDIAN
 401     assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 402     jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
 403 #else
 404     assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 405     jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
 406 #endif
 407     return (jshort)(new_value >> 16); // preserves sign
 408   }
 409 };
 410 
 411 template<typename Derived>
 412 template<typename I, typename D>
 413 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
 414   I addend = add_value;
 415   // If D is a pointer type P*, scale by sizeof(P).
 416   if (IsPointer<D>::value) {
 417     addend *= sizeof(typename RemovePointer<D>::type);
 418   }
 419   D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
 420   return old + add_value;
 421 }
 422 
 423 template<typename Derived>
 424 template<typename I, typename D>
 425 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
 426   // If D is a pointer type P*, scale by sizeof(P).
 427   if (IsPointer<D>::value) {
 428     add_value *= sizeof(typename RemovePointer<D>::type);




 202   // P*, then let addend (of type I) be add_value * sizeof(P);
 203   // otherwise, addend is add_value.
 204   //
 205   // FetchAndAdd requires the derived class to provide
 206   //   fetch_and_add(addend, dest)
 207   // atomically adding addend to the value of dest, and returning the
 208   // old value.
 209   //
 210   // AddAndFetch requires the derived class to provide
 211   //   add_and_fetch(addend, dest)
 212   // atomically adding addend to the value of dest, and returning the
 213   // new value.
 214   //
 215   // When D is a pointer type P*, both fetch_and_add and add_and_fetch
 216   // treat it as if it were a uintptr_t; they do not perform any
 217   // scaling of the addend, as that has already been done by the
 218   // caller.
 219 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 220   template<typename Derived> struct FetchAndAdd;
 221   template<typename Derived> struct AddAndFetch;
 222   struct AddShortUsingInt;
 223 private:
 224 
 225   // Support for platforms that implement some variants of add using a
 226   // (typically out of line) non-template helper function.  The
 227   // generic arguments passed to PlatformAdd need to be translated to
 228   // the appropriate type for the helper function, the helper function
 229   // invoked on the translated arguments, and the result translated
 230   // back.  Type is the parameter / return type of the helper
 231   // function.  No scaling of add_value is performed when D is a pointer
 232   // type, so this function can be used to implement the support function
 233   // required by AddAndFetch.
 234   template<typename Type, typename Fn, typename I, typename D>
 235   static D add_using_helper(Fn fn, I add_value, D volatile* dest);
 236 
 237   // Dispatch handler for cmpxchg.  Provides type-based validity
 238   // checking and limited conversions around calls to the
 239   // platform-specific implementation layer provided by
 240   // PlatformCmpxchg.
 241   template<typename T, typename D, typename U, typename Enable = void>
 242   struct CmpxchgImpl;


 296 
 297   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 298 };
 299 
 300 // Define FetchAndAdd and AddAndFetch helper classes before including
 301 // platform file, which may use these as base classes, requiring they
 302 // be complete.
 303 
 304 template<typename Derived>
 305 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
 306   template<typename I, typename D>
 307   D operator()(I add_value, D volatile* dest) const;
 308 };
 309 
 310 template<typename Derived>
 311 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
 312   template<typename I, typename D>
 313   D operator()(I add_value, D volatile* dest) const;
 314 };
 315 
 316 // Most platforms do not support atomic add on a 2-byte value. However,
 317 // if the value occupies the most significant 16 bits of an aligned 32-bit
 318 // word, then we can do this with an atomic add of (add_value << 16)
 319 // to the 32-bit word.
 320 //
 321 // The least significant parts of this 32-bit word will never be affected, even
 322 // in case of overflow/underflow.
 323 //
 324 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 325 struct Atomic::AddShortUsingInt VALUE_OBJ_CLASS_SPEC {
 326   template<typename T>
 327   T operator()(T add_value, T volatile* dest) const;
 328 };
 329 
 330 // Define the class before including platform file, which may specialize
 331 // the operator definition.  No generic definition of specializations
 332 // of the operator template are provided, nor are there any generic
 333 // specializations of the class.  The platform file is responsible for
 334 // providing those.
 335 template<size_t byte_size>
 336 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
 337   template<typename T>
 338   T operator()(T exchange_value,
 339                T volatile* dest,
 340                T compare_value,
 341                cmpxchg_memory_order order) const;
 342 };
 343 
 344 // Define the class before including platform file, which may use this
 345 // as a base class, requiring it be complete.  The definition is later
 346 // in this file, near the other definitions related to cmpxchg.
 347 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
 348   template<typename T>
 349   T operator()(T exchange_value,


 383   }
 384 };
 385 
 386 template<typename I, typename P>
 387 struct Atomic::AddImpl<
 388   I, P*,
 389   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 390   VALUE_OBJ_CLASS_SPEC
 391 {
 392   P* operator()(I add_value, P* volatile* dest) const {
 393     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
 394     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
 395     typedef typename Conditional<IsSigned<I>::value,
 396                                  intptr_t,
 397                                  uintptr_t>::type CI;
 398     CI addend = add_value;
 399     return PlatformAdd<sizeof(P*)>()(addend, dest);
 400   }
 401 };
 402 
 403 template<typename T>
 404 T Atomic::AddShortUsingInt::operator()(T add_value, T volatile* dest) const {










 405 #ifdef VM_LITTLE_ENDIAN
 406   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 407   uint32_t new_value = Atomic::add(uint32_t(add_value) << 16, (volatile uint32_t*)(dest-1));
 408 #else
 409   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 410   uint32_t new_value = Atomic::add(uint32_t(add_value) << 16, (volatile uint32_t*)(dest));
 411 #endif
 412   return T(new_value >> 16); // preserves sign

 413 };
 414 
 415 template<typename Derived>
 416 template<typename I, typename D>
 417 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
 418   I addend = add_value;
 419   // If D is a pointer type P*, scale by sizeof(P).
 420   if (IsPointer<D>::value) {
 421     addend *= sizeof(typename RemovePointer<D>::type);
 422   }
 423   D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
 424   return old + add_value;
 425 }
 426 
 427 template<typename Derived>
 428 template<typename I, typename D>
 429 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
 430   // If D is a pointer type P*, scale by sizeof(P).
 431   if (IsPointer<D>::value) {
 432     add_value *= sizeof(typename RemovePointer<D>::type);


< prev index next >