1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "metaprogramming/isIntegral.hpp"
  32 #include "metaprogramming/isPointer.hpp"
  33 #include "metaprogramming/isSame.hpp"
  34 #include "metaprogramming/primitiveConversions.hpp"
  35 #include "metaprogramming/removeCV.hpp"
  36 #include "metaprogramming/removePointer.hpp"
  37 #include "utilities/align.hpp"
  38 #include "utilities/macros.hpp"
  39 
  40 enum cmpxchg_memory_order {
  41   memory_order_relaxed,
  42   // Use value which doesn't interfere with C++2011. We need to be more conservative.
  43   memory_order_conservative = 8
  44 };
  45 
  46 class Atomic : AllStatic {
  47  public:
  48   // Atomic operations on jlong types are not available on all 32-bit
  49   // platforms. If atomic ops on jlongs are defined here they must only
  50   // be used from code that verifies they are available at runtime and
  51   // can provide an alternative action if not - see supports_cx8() for
  52   // a means to test availability.
  53 
  54   // The memory operations that are mentioned with each of the atomic
  55   // function families come from src/share/vm/runtime/orderAccess.hpp,
  56   // e.g., <fence> is described in that file and is implemented by the
  57   // OrderAccess::fence() function. See that file for the gory details
  58   // on the Memory Access Ordering Model.
  59 
  60   // All of the atomic operations that imply a read-modify-write action
  61   // guarantee a two-way memory barrier across that operation. Historically
  62   // these semantics reflect the strength of atomic operations that are
  63   // provided on SPARC/X86. We assume that strength is necessary unless
  64   // we can prove that a weaker form is sufficiently safe.
  65 
  66   // Atomically store to a location
  67   inline static void store    (jbyte    store_value, jbyte*    dest);
  68   inline static void store    (jshort   store_value, jshort*   dest);
  69   inline static void store    (jint     store_value, jint*     dest);
  70   // See comment above about using jlong atomics on 32-bit platforms
  71   inline static void store    (jlong    store_value, jlong*    dest);
  72   inline static void store_ptr(intptr_t store_value, intptr_t* dest);
  73   inline static void store_ptr(void*    store_value, void*     dest);
  74 
  75   inline static void store    (jbyte    store_value, volatile jbyte*    dest);
  76   inline static void store    (jshort   store_value, volatile jshort*   dest);
  77   inline static void store    (jint     store_value, volatile jint*     dest);
  78   // See comment above about using jlong atomics on 32-bit platforms
  79   inline static void store    (jlong    store_value, volatile jlong*    dest);
  80   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
  81   inline static void store_ptr(void*    store_value, volatile void*     dest);
  82 
  83   // See comment above about using jlong atomics on 32-bit platforms
  84   inline static jlong load(const volatile jlong* src);
  85 
  86   // Atomically add to a location. Returns updated value. add*() provide:
  87   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  88 
  89   template<typename I, typename D>
  90   inline static D add(I add_value, D volatile* dest);
  91 
  92   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
  93     return add(add_value, dest);
  94   }
  95 
  96   inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
  97     return add(add_value, reinterpret_cast<char* volatile*>(dest));
  98   }
  99 
 100   // Atomically increment location. inc*() provide:
 101   // <fence> increment-dest <membar StoreLoad|StoreStore>
 102   inline static void inc    (volatile jint*     dest);
 103   inline static void inc    (volatile jshort*   dest);
 104   inline static void inc    (volatile size_t*   dest);
 105   inline static void inc_ptr(volatile intptr_t* dest);
 106   inline static void inc_ptr(volatile void*     dest);
 107 
 108   // Atomically decrement a location. dec*() provide:
 109   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 110   inline static void dec    (volatile jint*     dest);
 111   inline static void dec    (volatile jshort*   dest);
 112   inline static void dec    (volatile size_t*   dest);
 113   inline static void dec_ptr(volatile intptr_t* dest);
 114   inline static void dec_ptr(volatile void*     dest);
 115 
 116   // Performs atomic exchange of *dest with exchange_value. Returns old
 117   // prior value of *dest. xchg*() provide:
 118   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 119   inline static jint         xchg    (jint         exchange_value, volatile jint*         dest);
 120   inline static unsigned int xchg    (unsigned int exchange_value, volatile unsigned int* dest);
 121   inline static intptr_t     xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
 122   inline static void*        xchg_ptr(void*        exchange_value, volatile void*         dest);
 123 
 124   // Performs atomic compare of *dest and compare_value, and exchanges
 125   // *dest with exchange_value if the comparison succeeded. Returns prior
 126   // value of *dest. cmpxchg*() provide:
 127   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 128 
 129   template<typename T, typename D, typename U>
 130   inline static D cmpxchg(T exchange_value,
 131                           D volatile* dest,
 132                           U compare_value,
 133                           cmpxchg_memory_order order = memory_order_conservative);
 134 
 135   // Performs atomic compare of *dest and NULL, and replaces *dest
 136   // with exchange_value if the comparison succeeded.  Returns true if
 137   // the comparison succeeded and the exchange occurred.  This is
 138   // often used as part of lazy initialization, as a lock-free
 139   // alternative to the Double-Checked Locking Pattern.
 140   template<typename T, typename D>
 141   inline static bool replace_if_null(T* value, D* volatile* dest,
 142                                      cmpxchg_memory_order order = memory_order_conservative);
 143 
 144   inline static intptr_t cmpxchg_ptr(intptr_t exchange_value,
 145                                      volatile intptr_t* dest,
 146                                      intptr_t compare_value,
 147                                      cmpxchg_memory_order order = memory_order_conservative) {
 148     return cmpxchg(exchange_value, dest, compare_value, order);
 149   }
 150 
 151   inline static void* cmpxchg_ptr(void* exchange_value,
 152                                   volatile void* dest,
 153                                   void* compare_value,
 154                                   cmpxchg_memory_order order = memory_order_conservative) {
 155     return cmpxchg(exchange_value,
 156                    reinterpret_cast<void* volatile*>(dest),
 157                    compare_value,
 158                    order);
 159   }
 160 
 161 private:
 162   // Test whether From is implicitly convertible to To.
 163   // From and To must be pointer types.
 164   // Note: Provides the limited subset of C++11 std::is_convertible
 165   // that is needed here.
 166   template<typename From, typename To> struct IsPointerConvertible;
 167 
 168   // Dispatch handler for add.  Provides type-based validity checking
 169   // and limited conversions around calls to the platform-specific
 170   // implementation layer provided by PlatformAdd.
 171   template<typename I, typename D, typename Enable = void>
 172   struct AddImpl;
 173 
 174   // Platform-specific implementation of add.  Support for sizes of 4
 175   // bytes and (if different) pointer size bytes are required.  The
 176   // class is a function object that must be default constructable,
 177   // with these requirements:
 178   //
 179   // - dest is of type D*, an integral or pointer type.
 180   // - add_value is of type I, an integral type.
 181   // - sizeof(I) == sizeof(D).
 182   // - if D is an integral type, I == D.
 183   // - platform_add is an object of type PlatformAdd<sizeof(D)>.
 184   //
 185   // Then
 186   //   platform_add(add_value, dest)
 187   // must be a valid expression, returning a result convertible to D.
 188   //
 189   // No definition is provided; all platforms must explicitly define
 190   // this class and any needed specializations.
 191   template<size_t byte_size> struct PlatformAdd;
 192 
 193   // Helper base classes for defining PlatformAdd.  To use, define
 194   // PlatformAdd or a specialization that derives from one of these,
 195   // and include in the PlatformAdd definition the support function
 196   // (described below) required by the base class.
 197   //
 198   // These classes implement the required function object protocol for
 199   // PlatformAdd, using a support function template provided by the
 200   // derived class.  Let add_value (of type I) and dest (of type D) be
 201   // the arguments the object is called with.  If D is a pointer type
 202   // P*, then let addend (of type I) be add_value * sizeof(P);
 203   // otherwise, addend is add_value.
 204   //
 205   // FetchAndAdd requires the derived class to provide
 206   //   fetch_and_add(addend, dest)
 207   // atomically adding addend to the value of dest, and returning the
 208   // old value.
 209   //
 210   // AddAndFetch requires the derived class to provide
 211   //   add_and_fetch(addend, dest)
 212   // atomically adding addend to the value of dest, and returning the
 213   // new value.
 214   //
 215   // When D is a pointer type P*, both fetch_and_add and add_and_fetch
 216   // treat it as if it were a uintptr_t; they do not perform any
 217   // scaling of the addend, as that has already been done by the
 218   // caller.
 219 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 220   template<typename Derived> struct FetchAndAdd;
 221   template<typename Derived> struct AddAndFetch;
 222   struct AddShortUsingInt;
 223 private:
 224 
 225   // Support for platforms that implement some variants of add using a
 226   // (typically out of line) non-template helper function.  The
 227   // generic arguments passed to PlatformAdd need to be translated to
 228   // the appropriate type for the helper function, the helper function
 229   // invoked on the translated arguments, and the result translated
 230   // back.  Type is the parameter / return type of the helper
 231   // function.  No scaling of add_value is performed when D is a pointer
 232   // type, so this function can be used to implement the support function
 233   // required by AddAndFetch.
 234   template<typename Type, typename Fn, typename I, typename D>
 235   static D add_using_helper(Fn fn, I add_value, D volatile* dest);
 236 
 237   // Dispatch handler for cmpxchg.  Provides type-based validity
 238   // checking and limited conversions around calls to the
 239   // platform-specific implementation layer provided by
 240   // PlatformCmpxchg.
 241   template<typename T, typename D, typename U, typename Enable = void>
 242   struct CmpxchgImpl;
 243 
 244   // Platform-specific implementation of cmpxchg.  Support for sizes
 245   // of 1, 4, and 8 are required.  The class is a function object that
 246   // must be default constructable, with these requirements:
 247   //
 248   // - dest is of type T*.
 249   // - exchange_value and compare_value are of type T.
 250   // - order is of type cmpxchg_memory_order.
 251   // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
 252   //
 253   // Then
 254   //   platform_cmpxchg(exchange_value, dest, compare_value, order)
 255   // must be a valid expression, returning a result convertible to T.
 256   //
 257   // A default definition is provided, which declares a function template
 258   //   T operator()(T, T volatile*, T, cmpxchg_memory_order) const
 259   //
 260   // For each required size, a platform must either provide an
 261   // appropriate definition of that function, or must entirely
 262   // specialize the class template for that size.
 263   template<size_t byte_size> struct PlatformCmpxchg;
 264 
 265   // Support for platforms that implement some variants of cmpxchg
 266   // using a (typically out of line) non-template helper function.
 267   // The generic arguments passed to PlatformCmpxchg need to be
 268   // translated to the appropriate type for the helper function, the
 269   // helper invoked on the translated arguments, and the result
 270   // translated back.  Type is the parameter / return type of the
 271   // helper function.
 272   template<typename Type, typename Fn, typename T>
 273   static T cmpxchg_using_helper(Fn fn,
 274                                 T exchange_value,
 275                                 T volatile* dest,
 276                                 T compare_value);
 277 
 278   // Support platforms that do not provide Read-Modify-Write
 279   // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
 280   // this class.
 281 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
 282   struct CmpxchgByteUsingInt;
 283 private:
 284 };
 285 
 286 template<typename From, typename To>
 287 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
 288   // Determine whether From* is implicitly convertible to To*, using
 289   // the "sizeof trick".
 290   typedef char yes;
 291   typedef char (&no)[2];
 292 
 293   static yes test(To*);
 294   static no test(...);
 295   static From* test_value;
 296 
 297   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 298 };
 299 
 300 // Define FetchAndAdd and AddAndFetch helper classes before including
 301 // platform file, which may use these as base classes, requiring they
 302 // be complete.
 303 
 304 template<typename Derived>
 305 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
 306   template<typename I, typename D>
 307   D operator()(I add_value, D volatile* dest) const;
 308 };
 309 
 310 template<typename Derived>
 311 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
 312   template<typename I, typename D>
 313   D operator()(I add_value, D volatile* dest) const;
 314 };
 315 
 316 // Most platforms do not support atomic add on a 2-byte value. However,
 317 // if the value occupies the most significant 16 bits of an aligned 32-bit
 318 // word, then we can do this with an atomic add of (add_value << 16)
 319 // to the 32-bit word.
 320 //
 321 // The least significant parts of this 32-bit word will never be affected, even
 322 // in case of overflow/underflow.
 323 //
 324 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 325 struct Atomic::AddShortUsingInt VALUE_OBJ_CLASS_SPEC {
 326   template<typename T>
 327   T operator()(T add_value, T volatile* dest) const;
 328 };
 329 
 330 // Define the class before including platform file, which may specialize
 331 // the operator definition.  No generic definition of specializations
 332 // of the operator template are provided, nor are there any generic
 333 // specializations of the class.  The platform file is responsible for
 334 // providing those.
 335 template<size_t byte_size>
 336 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
 337   template<typename T>
 338   T operator()(T exchange_value,
 339                T volatile* dest,
 340                T compare_value,
 341                cmpxchg_memory_order order) const;
 342 };
 343 
 344 // Define the class before including platform file, which may use this
 345 // as a base class, requiring it be complete.  The definition is later
 346 // in this file, near the other definitions related to cmpxchg.
 347 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
 348   template<typename T>
 349   T operator()(T exchange_value,
 350                T volatile* dest,
 351                T compare_value,
 352                cmpxchg_memory_order order) const;
 353 };
 354 
 355 // platform specific in-line definitions - must come before shared definitions
 356 
 357 #include OS_CPU_HEADER(atomic)
 358 
 359 // shared in-line definitions
 360 
 361 // size_t casts...
 362 #if (SIZE_MAX != UINTPTR_MAX)
 363 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 364 #endif
 365 
 366 template<typename I, typename D>
 367 inline D Atomic::add(I add_value, D volatile* dest) {
 368   return AddImpl<I, D>()(add_value, dest);
 369 }
 370 
 371 template<typename I, typename D>
 372 struct Atomic::AddImpl<
 373   I, D,
 374   typename EnableIf<IsIntegral<I>::value &&
 375                     IsIntegral<D>::value &&
 376                     (sizeof(I) <= sizeof(D)) &&
 377                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 378   VALUE_OBJ_CLASS_SPEC
 379 {
 380   D operator()(I add_value, D volatile* dest) const {
 381     D addend = add_value;
 382     return PlatformAdd<sizeof(D)>()(addend, dest);
 383   }
 384 };
 385 
 386 template<typename I, typename P>
 387 struct Atomic::AddImpl<
 388   I, P*,
 389   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 390   VALUE_OBJ_CLASS_SPEC
 391 {
 392   P* operator()(I add_value, P* volatile* dest) const {
 393     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
 394     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
 395     typedef typename Conditional<IsSigned<I>::value,
 396                                  intptr_t,
 397                                  uintptr_t>::type CI;
 398     CI addend = add_value;
 399     return PlatformAdd<sizeof(P*)>()(addend, dest);
 400   }
 401 };
 402 
 403 template<typename T>
 404 T Atomic::AddShortUsingInt::operator()(T add_value, T volatile* dest) const {
 405 #ifdef VM_LITTLE_ENDIAN
 406   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 407   uint32_t new_value = Atomic::add(uint32_t(add_value) << 16, (volatile uint32_t*)(dest-1));
 408 #else
 409   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 410   uint32_t new_value = Atomic::add(uint32_t(add_value) << 16, (volatile uint32_t*)(dest));
 411 #endif
 412   return T(new_value >> 16); // preserves sign
 413 };
 414 
 415 template<typename Derived>
 416 template<typename I, typename D>
 417 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
 418   I addend = add_value;
 419   // If D is a pointer type P*, scale by sizeof(P).
 420   if (IsPointer<D>::value) {
 421     addend *= sizeof(typename RemovePointer<D>::type);
 422   }
 423   D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
 424   return old + add_value;
 425 }
 426 
 427 template<typename Derived>
 428 template<typename I, typename D>
 429 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
 430   // If D is a pointer type P*, scale by sizeof(P).
 431   if (IsPointer<D>::value) {
 432     add_value *= sizeof(typename RemovePointer<D>::type);
 433   }
 434   return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
 435 }
 436 
 437 template<typename Type, typename Fn, typename I, typename D>
 438 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
 439   return PrimitiveConversions::cast<D>(
 440     fn(PrimitiveConversions::cast<Type>(add_value),
 441        reinterpret_cast<Type volatile*>(dest)));
 442 }
 443 
 444 inline void Atomic::inc(volatile size_t* dest) {
 445   inc_ptr((volatile intptr_t*) dest);
 446 }
 447 
 448 inline void Atomic::dec(volatile size_t* dest) {
 449   dec_ptr((volatile intptr_t*) dest);
 450 }
 451 
 452 template<typename T, typename D, typename U>
 453 inline D Atomic::cmpxchg(T exchange_value,
 454                          D volatile* dest,
 455                          U compare_value,
 456                          cmpxchg_memory_order order) {
 457   return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
 458 }
 459 
 460 template<typename T, typename D>
 461 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
 462                                     cmpxchg_memory_order order) {
 463   // Presently using a trivial implementation in terms of cmpxchg.
 464   // Consider adding platform support, to permit the use of compiler
 465   // intrinsics like gcc's __sync_bool_compare_and_swap.
 466   D* expected_null = NULL;
 467   return expected_null == cmpxchg(value, dest, expected_null, order);
 468 }
 469 
 470 // Handle cmpxchg for integral and enum types.
 471 //
 472 // All the involved types must be identical.
 473 template<typename T>
 474 struct Atomic::CmpxchgImpl<
 475   T, T, T,
 476   typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
 477   VALUE_OBJ_CLASS_SPEC
 478 {
 479   T operator()(T exchange_value, T volatile* dest, T compare_value,
 480                cmpxchg_memory_order order) const {
 481     // Forward to the platform handler for the size of T.
 482     return PlatformCmpxchg<sizeof(T)>()(exchange_value,
 483                                         dest,
 484                                         compare_value,
 485                                         order);
 486   }
 487 };
 488 
 489 // Handle cmpxchg for pointer types.
 490 //
 491 // The destination's type and the compare_value type must be the same,
 492 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
 493 // the compare_value.
 494 //
 495 // The exchange_value must be implicitly convertible to the
 496 // destination's type; it must be type-correct to store the
 497 // exchange_value in the destination.
 498 template<typename T, typename D, typename U>
 499 struct Atomic::CmpxchgImpl<
 500   T*, D*, U*,
 501   typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
 502                     IsSame<typename RemoveCV<D>::type,
 503                            typename RemoveCV<U>::type>::value>::type>
 504   VALUE_OBJ_CLASS_SPEC
 505 {
 506   D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
 507                cmpxchg_memory_order order) const {
 508     // Allow derived to base conversion, and adding cv-qualifiers.
 509     D* new_value = exchange_value;
 510     // Don't care what the CV qualifiers for compare_value are,
 511     // but we need to match D* when calling platform support.
 512     D* old_value = const_cast<D*>(compare_value);
 513     return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
 514   }
 515 };
 516 
 517 // Handle cmpxchg for types that have a translator.
 518 //
 519 // All the involved types must be identical.
 520 //
 521 // This translates the original call into a call on the decayed
 522 // arguments, and returns the recovered result of that translated
 523 // call.
 524 template<typename T>
 525 struct Atomic::CmpxchgImpl<
 526   T, T, T,
 527   typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
 528   VALUE_OBJ_CLASS_SPEC
 529 {
 530   T operator()(T exchange_value, T volatile* dest, T compare_value,
 531                cmpxchg_memory_order order) const {
 532     typedef PrimitiveConversions::Translate<T> Translator;
 533     typedef typename Translator::Decayed Decayed;
 534     STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
 535     return Translator::recover(
 536       cmpxchg(Translator::decay(exchange_value),
 537               reinterpret_cast<Decayed volatile*>(dest),
 538               Translator::decay(compare_value),
 539               order));
 540   }
 541 };
 542 
 543 template<typename Type, typename Fn, typename T>
 544 inline T Atomic::cmpxchg_using_helper(Fn fn,
 545                                       T exchange_value,
 546                                       T volatile* dest,
 547                                       T compare_value) {
 548   STATIC_ASSERT(sizeof(Type) == sizeof(T));
 549   return PrimitiveConversions::cast<T>(
 550     fn(PrimitiveConversions::cast<Type>(exchange_value),
 551        reinterpret_cast<Type volatile*>(dest),
 552        PrimitiveConversions::cast<Type>(compare_value)));
 553 }
 554 
 555 template<typename T>
 556 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
 557                                                  T volatile* dest,
 558                                                  T compare_value,
 559                                                  cmpxchg_memory_order order) const {
 560   STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
 561   uint8_t canon_exchange_value = exchange_value;
 562   uint8_t canon_compare_value = compare_value;
 563   volatile uint32_t* aligned_dest
 564     = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
 565   size_t offset = pointer_delta(dest, aligned_dest, 1);
 566   uint32_t cur = *aligned_dest;
 567   uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
 568 
 569   // current value may not be what we are looking for, so force it
 570   // to that value so the initial cmpxchg will fail if it is different
 571   cur_as_bytes[offset] = canon_compare_value;
 572 
 573   // always execute a real cmpxchg so that we get the required memory
 574   // barriers even on initial failure
 575   do {
 576     // value to swap in matches current value ...
 577     uint32_t new_value = cur;
 578     // ... except for the one jbyte we want to update
 579     reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
 580 
 581     uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
 582     if (res == cur) break;      // success
 583 
 584     // at least one byte in the int changed value, so update
 585     // our view of the current int
 586     cur = res;
 587     // if our byte is still as cur we loop and try again
 588   } while (cur_as_bytes[offset] == canon_compare_value);
 589 
 590   return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
 591 }
 592 
 593 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 594   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 595   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 596 }
 597 
 598 inline void Atomic::inc(volatile jshort* dest) {
 599   (void)add(jshort(1), dest);
 600 }
 601 
 602 inline void Atomic::dec(volatile jshort* dest) {
 603   (void)add(jshort(-1), dest);
 604 }
 605 
 606 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP