< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page
rev 13323 : imported patch Atomic_refactoring
rev 13325 : imported patch Atomic_polishing_v2
rev 13327 : [mq]: SpecializableAtomic

@@ -24,23 +24,113 @@
 
 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
 #define SHARE_VM_RUNTIME_ATOMIC_HPP
 
 #include "memory/allocation.hpp"
+#include "metaprogramming/conditional.hpp"
+#include "metaprogramming/integerTypes.hpp"
+#include "metaprogramming/isDerived.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
 #include "utilities/align.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
 enum cmpxchg_memory_order {
   memory_order_relaxed,
   // Use value which doesn't interfere with C++2011. We need to be more conservative.
   memory_order_conservative = 8
 };
 
+class GeneralizedAtomic : AllStatic {
+  template<typename T> class Never: public FalseType {};
+
+  template <typename T>
+  inline static void specialized_store(T store_value, volatile T* dest) {
+    STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
+    (void)const_cast<T&>(*dest = store_value);
+  }
+
+  template <typename T>
+  inline static T specialized_load(const volatile T* dest) {
+    STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
+    return *dest;
+  }
+
+  template <typename T>
+  inline static T specialized_add(T add_value, volatile T* dest) {
+    STATIC_ASSERT(Never<T>::value);
+    return add_value;
+  }
+
+  template <typename T>
+  inline static void specialized_inc(volatile T* dest) {
+    add(1, dest);
+  }
+
+  template <typename T>
+  inline static void specialized_dec(volatile T* dest) {
+    add(-1, dest);
+  }
+
+  template <typename T>
+  inline static T specialized_xchg(T exchange_value, volatile T* dest) {
+    STATIC_ASSERT(Never<T>::value);
+    return exchange_value;
+  }
+
+  template <typename T>
+  inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) {
+    STATIC_ASSERT(Never<T>::value);
+    return exchange_value;
+  }
+
+public:
+  template <typename T, typename U>
+  inline static void store(T store_value, volatile U* dest);
+
+  template <typename T>
+  inline static T load(volatile T* src);
+
+  template <typename T, typename U>
+  inline static U add(T add_value, volatile U* dst);
+
+  template <typename T, typename U>
+  inline static U* add(T add_value, U* volatile* dst);
+
+  template <typename T>
+  inline static void inc(volatile T* dest);
+
+  template <typename T>
+  inline static void inc(T* volatile* dest);
+
+  template <typename T>
+  inline static void dec(volatile T* dest);
+
+  template <typename T>
+  inline static void dec(T* volatile* dest);
+
+  template <typename T, typename U>
+  inline static U xchg(T exchange_value, volatile U* dest);
+
+  template <typename T, typename U, typename V>
+  inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order);
+};
+
+
+// platform specific in-line definitions - must come before shared definitions
+
+class PlatformAtomic;
+
+#include OS_CPU_HEADER(atomic)
+
+typedef Conditional<IsDerived<PlatformAtomic, AllStatic>::value, PlatformAtomic, GeneralizedAtomic>::type AtomicImpl;
+
 class Atomic : AllStatic {
  public:
-  // Atomic operations on jlong types are not available on all 32-bit
-  // platforms. If atomic ops on jlongs are defined here they must only
+  // Atomic operations on 64-bit types are not available on all 32-bit
+  // platforms. If atomic ops on 64-bit types are defined here they must only
   // be used from code that verifies they are available at runtime and
   // can provide an alternative action if not - see supports_cx8() for
   // a means to test availability.
 
   // The memory operations that are mentioned with each of the atomic

@@ -54,153 +144,283 @@
   // these semantics reflect the strength of atomic operations that are
   // provided on SPARC/X86. We assume that strength is necessary unless
   // we can prove that a weaker form is sufficiently safe.
 
   // Atomically store to a location
-  inline static void store    (jbyte    store_value, jbyte*    dest);
-  inline static void store    (jshort   store_value, jshort*   dest);
-  inline static void store    (jint     store_value, jint*     dest);
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static void store    (jlong    store_value, jlong*    dest);
-  inline static void store_ptr(intptr_t store_value, intptr_t* dest);
-  inline static void store_ptr(void*    store_value, void*     dest);
-
-  inline static void store    (jbyte    store_value, volatile jbyte*    dest);
-  inline static void store    (jshort   store_value, volatile jshort*   dest);
-  inline static void store    (jint     store_value, volatile jint*     dest);
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static void store    (jlong    store_value, volatile jlong*    dest);
-  inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
-  inline static void store_ptr(void*    store_value, volatile void*     dest);
-
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static jlong load(const volatile jlong* src);
+  // See comment above about using 64-bit atomics on 32-bit platforms
+  template <typename T, typename U>
+  inline static void store(T store_value, volatile U* dest);
+
+  // The store_ptr() member functions are deprecated. Use store() instead.
+  static void store_ptr(intptr_t store_value, volatile intptr_t* dest) {
+    store(store_value, dest);
+  }
+
+  static void store_ptr(void*    store_value, volatile void*     dest) {
+    store((intptr_t)store_value, (volatile intptr_t*)dest);
+  }
+
+  // Atomically load from a location
+  // See comment above about using 64-bit atomics on 32-bit platforms
+  template <typename T>
+  inline static T load(volatile T* src);
 
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
-  inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
-  inline static jint     add    (jint     add_value, volatile jint*     dest);
-  inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
-  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
-  inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
+  // add(I1 v, I* d)
+  // add(I1 v, P* d)
+  // where I, I1 are integral types, P is a pointer type.
+  // Functional behavior is modelled on *dest += add_value.
+  template <typename T, typename U>
+  inline static U add(T add_value, volatile U* dst);
+
+  // The add_ptr() member functions are deprecated. Use add() instead.
+  static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+    return add(add_value, dest);
+  }
+
+  static void*    add_ptr(intptr_t add_value, volatile void*     dest) {
+    return (void*)add(add_value, (volatile intptr_t*)dest);
+  }
 
   // Atomically increment location. inc*() provide:
   // <fence> increment-dest <membar StoreLoad|StoreStore>
-  inline static void inc    (volatile jint*     dest);
-  inline static void inc    (volatile jshort*   dest);
-  inline static void inc    (volatile size_t*   dest);
-  inline static void inc_ptr(volatile intptr_t* dest);
-  inline static void inc_ptr(volatile void*     dest);
+  // Functional behavior is modelled on *dest++
+  template <typename T>
+  inline static void inc(volatile T* dest);
+
+  // The inc_ptr member functions are deprecated. Use inc() instead.
+  static void inc_ptr(volatile intptr_t* dest) {
+    inc(dest);
+  }
+
+  static void inc_ptr(volatile void*     dest) {
+    inc((volatile intptr_t*)dest);
+  }
 
   // Atomically decrement a location. dec*() provide:
   // <fence> decrement-dest <membar StoreLoad|StoreStore>
-  inline static void dec    (volatile jint*     dest);
-  inline static void dec    (volatile jshort*   dest);
-  inline static void dec    (volatile size_t*   dest);
-  inline static void dec_ptr(volatile intptr_t* dest);
-  inline static void dec_ptr(volatile void*     dest);
+  // Functional behavior is modelled on *dest--
+  template <typename T>
+  inline static void dec(volatile T* dest);
+
+  // The dec_ptr member functions are deprecated. Use dec() instead.
+  static void dec_ptr(volatile intptr_t* dest) {
+    dec(dest);
+  }
+
+  static void dec_ptr(volatile void*     dest) {
+    dec((volatile intptr_t*)dest);
+  }
 
   // Performs atomic exchange of *dest with exchange_value. Returns old
   // prior value of *dest. xchg*() provide:
   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
-  inline static jint         xchg    (jint         exchange_value, volatile jint*         dest);
-  inline static unsigned int xchg    (unsigned int exchange_value, volatile unsigned int* dest);
-  inline static intptr_t     xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
-  inline static void*        xchg_ptr(void*        exchange_value, volatile void*         dest);
+  template <typename T, typename U>
+  inline static U xchg(T exchange_value, volatile U* dest);
+
+  // The xchg_ptr() member functions are deprecated. Use xchg() instead.
+  static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+    return xchg(exchange_value, dest);
+  }
+
+  static void*    xchg_ptr(void*    exchange_value, volatile void*     dest) {
+    return (void*)xchg((intptr_t)exchange_value, (volatile intptr_t*)dest);
+  }
 
   // Performs atomic compare of *dest and compare_value, and exchanges
   // *dest with exchange_value if the comparison succeeded. Returns prior
   // value of *dest. cmpxchg*() provide:
   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
-  inline static jbyte        cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static jint         cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  // See comment above about using jlong atomics on 32-bit platforms
-  inline static jlong        cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static unsigned int cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static intptr_t     cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value, cmpxchg_memory_order order = memory_order_conservative);
-  inline static void*        cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
+  // See comment above about using 64-bit atomics on 32-bit platforms
+  template <typename T, typename U, typename V>
+  inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order = memory_order_conservative);
+
+  // The cmpxchg_ptr member functions are deprecated. Use cmpxchg() instead.
+  inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t*  dest,
+                                     intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) {
+    return cmpxchg(exchange_value, dest, compare_value, order);
+  }
+
+  inline static void*    cmpxchg_ptr(void*    exchange_value, volatile void*      dest,
+                                     void*    compare_value, cmpxchg_memory_order order = memory_order_conservative) {
+    return (void*)cmpxchg((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
+  }
 };
 
-// platform specific in-line definitions - must come before shared definitions
+// internal implementation
 
-#include OS_CPU_HEADER(atomic)
+template <typename T, typename U>
+inline void GeneralizedAtomic::store(T store_value, volatile U* dest) {
+  typedef typename IntegerTypes::Signed<U>::type Raw;
+  U store_value_cast = store_value;
+  specialized_store(IntegerTypes::cast_to_signed(store_value_cast), reinterpret_cast<volatile Raw*>(dest));
+}
 
-// shared in-line definitions
+template <typename T>
+inline T GeneralizedAtomic::load(volatile T* src) {
+  typedef typename IntegerTypes::Signed<T>::type Raw;
+  return IntegerTypes::cast<T>(specialized_load(reinterpret_cast<const volatile Raw*>(src)));
+}
 
-// size_t casts...
-#if (SIZE_MAX != UINTPTR_MAX)
-#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
-#endif
+template <typename T, typename U>
+inline U GeneralizedAtomic::add(T add_value, volatile U* dst) {
+  STATIC_ASSERT(IsIntegral<T>::value);
+  STATIC_ASSERT(IsIntegral<U>::value);
+  typedef typename IntegerTypes::Signed<U>::type Raw;
+  // Allow -Wconversion or the like to complain about unsafe conversions.
+  U value = add_value;
+  Raw raw_value = IntegerTypes::cast_to_signed(value);
+  Raw result = specialized_add(raw_value, reinterpret_cast<volatile Raw*>(dst));
+  return IntegerTypes::cast<U>(result);
+}
+
+template <typename T, typename U>
+inline U* GeneralizedAtomic::add(T add_value, U* volatile* dst) {
+  STATIC_ASSERT(IsIntegral<T>::value);
+  typedef typename IntegerTypes::Signed<U*>::type Raw;
+  ptrdiff_t value = add_value;
+  Raw raw_value = IntegerTypes::cast_to_signed(value * sizeof(U));
+  Raw result = specialized_add(raw_value, reinterpret_cast<volatile Raw*>(dst));
+  return IntegerTypes::cast<U*>(result);
+}
+
+template <typename T>
+inline void GeneralizedAtomic::inc(volatile T* src) {
+  STATIC_ASSERT(IsIntegral<T>::value);
+  typedef typename IntegerTypes::Signed<T>::type Raw;
+  specialized_inc(reinterpret_cast<volatile Raw*>(src));
+}
+
+template <typename T>
+inline void GeneralizedAtomic::inc(T* volatile* src) {
+  if (sizeof(T) != 1) {
+    add(1, src);
+  } else {
+    typedef typename IntegerTypes::Signed<T*>::type Raw;
+    specialized_inc(reinterpret_cast<volatile Raw*>(src));
+  }
+}
+
+template <typename T>
+inline void GeneralizedAtomic::dec(volatile T* src) {
+  STATIC_ASSERT(IsIntegral<T>::value);
+  typedef typename IntegerTypes::Signed<T>::type Raw;
+  specialized_dec(reinterpret_cast<volatile Raw*>(src));
+}
+
+template <typename T>
+inline void GeneralizedAtomic::dec(T* volatile* src) {
+  if (sizeof(T) != 1) {
+    add(-1, src);
+  } else {
+    typedef typename IntegerTypes::Signed<T*>::type Raw;
+    specialized_dec(reinterpret_cast<volatile Raw*>(src));
+  }
+}
+
+template <typename T, typename U>
+inline U GeneralizedAtomic::xchg(T exchange_value, volatile U* dest) {
+  typedef typename IntegerTypes::Signed<U>::type Raw;
+  U exchange_value_cast = exchange_value;
+  Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast),
+                                reinterpret_cast<volatile Raw*>(dest));
+  return IntegerTypes::cast<U>(result);
+}
+
+template <typename T, typename U, typename V>
+inline U GeneralizedAtomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
+  typedef typename IntegerTypes::Signed<U>::type Raw;
+  U exchange_value_cast = exchange_value;
+  U compare_value_cast = compare_value;
+  Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast),
+                                   reinterpret_cast<volatile Raw*>(dest),
+                                   IntegerTypes::cast_to_signed(compare_value_cast), order);
+  return IntegerTypes::cast<U>(result);
+}
+
+template <typename T, typename U>
+inline void Atomic::store(T store_value, volatile U* dest) {
+  AtomicImpl::store(store_value, dest);
+}
 
-inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
-  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
+template <typename T>
+inline T Atomic::load(volatile T* src) {
+  return AtomicImpl::load(src);
 }
 
-inline void Atomic::inc(volatile size_t* dest) {
-  inc_ptr((volatile intptr_t*) dest);
+template <typename T, typename U>
+inline U Atomic::add(T add_value, volatile U* dst) {
+  return AtomicImpl::add(add_value, dst);
 }
 
-inline void Atomic::dec(volatile size_t* dest) {
-  dec_ptr((volatile intptr_t*) dest);
+template <typename T>
+inline void Atomic::inc(volatile T* src) {
+  AtomicImpl::inc(src);
 }
 
+template <typename T>
+inline void Atomic::dec(volatile T* src) {
+  AtomicImpl::dec(src);
+}
+
+template <typename T, typename U>
+inline U Atomic::xchg(T exchange_value, volatile U* dest) {
+  return AtomicImpl::xchg(exchange_value, dest);
+}
+
+template <typename T, typename U, typename V>
+inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
+  return AtomicImpl::cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+// shared in-line definitions
+
 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 /*
- * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
- * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
+ * This is the default implementation of byte-sized cmpxchg. It emulates 8-bit-sized cmpxchg
+ * in terms of 32-bit-sized cmpxchg. Platforms may override this by defining their own inline definition
  * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
  * implementation to be used instead.
  */
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
-                             jbyte compare_value, cmpxchg_memory_order order) {
-  STATIC_ASSERT(sizeof(jbyte) == 1);
-  volatile jint* dest_int =
-      reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
+template <>
+inline int8_t GeneralizedAtomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest,
+                                                             int8_t compare_value, cmpxchg_memory_order order) {
+  volatile int32_t* dest_int =
+      reinterpret_cast<volatile int32_t*>(align_down(dest, sizeof(int32_t)));
   size_t offset = pointer_delta(dest, dest_int, 1);
-  jint cur = *dest_int;
-  jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
+  int32_t cur = *dest_int;
+  int8_t* cur_as_bytes = reinterpret_cast<int8_t*>(&cur);
 
   // current value may not be what we are looking for, so force it
   // to that value so the initial cmpxchg will fail if it is different
   cur_as_bytes[offset] = compare_value;
 
   // always execute a real cmpxchg so that we get the required memory
   // barriers even on initial failure
   do {
     // value to swap in matches current value ...
-    jint new_value = cur;
-    // ... except for the one jbyte we want to update
-    reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
+    int32_t new_value = cur;
+    // ... except for the one byte we want to update
+    reinterpret_cast<int8_t*>(&new_value)[offset] = exchange_value;
 
-    jint res = cmpxchg(new_value, dest_int, cur, order);
+    int32_t res = cmpxchg(new_value, dest_int, cur, order);
     if (res == cur) break; // success
 
-    // at least one jbyte in the jint changed value, so update
-    // our view of the current jint
+    // at least one byte in the int changed value, so update
+    // our view of the current int
     cur = res;
-    // if our jbyte is still as cur we loop and try again
+    // if our byte is still as cur we loop and try again
   } while (cur_as_bytes[offset] == compare_value);
 
   return cur_as_bytes[offset];
 }
 
 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 
-inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
-  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
-  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
-                         volatile unsigned int* dest, unsigned int compare_value,
-                         cmpxchg_memory_order order) {
-  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
-  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
-                                       (jint)compare_value, order);
-}
-
-inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
+template <>
+inline int16_t GeneralizedAtomic::specialized_add<int16_t>(int16_t add_value, volatile int16_t* dest) {
   // Most platforms do not support atomic add on a 2-byte value. However,
   // if the value occupies the most significant 16 bits of an aligned 32-bit
   // word, then we can do this with an atomic add of (add_value << 16)
   // to the 32-bit word.
   //

@@ -208,22 +428,24 @@
   // in case of overflow/underflow.
   //
   // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 #ifdef VM_LITTLE_ENDIAN
   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
+  int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest-1));
 #else
   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
+  int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest));
 #endif
-  return (jshort)(new_value >> 16); // preserves sign
+  return (int16_t)(new_value >> 16); // preserves sign
 }
 
-inline void Atomic::inc(volatile jshort* dest) {
-  (void)add(1, dest);
+template <>
+inline void GeneralizedAtomic::specialized_inc<int16_t>(volatile int16_t* dest) {
+  (void)add(int16_t(1), dest);
 }
 
-inline void Atomic::dec(volatile jshort* dest) {
-  (void)add(-1, dest);
+template <>
+inline void GeneralizedAtomic::specialized_dec<int16_t>(volatile int16_t* dest) {
+  (void)add(int16_t(-1), dest);
 }
 
 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
< prev index next >