< prev index next >

src/share/vm/prims/unsafe.cpp

Print this page
rev 12906 : [mq]: gc_interface

@@ -26,14 +26,16 @@
 #include "classfile/classFileStream.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.inline.hpp"
+#include "oops/typeArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jni.h"
 #include "prims/jvm.h"
 #include "prims/unsafe.hpp"
+#include "runtime/access.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"

@@ -41,13 +43,10 @@
 #include "services/threadService.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 
 /**
  * Implementation of the jdk.internal.misc.Unsafe class
  */
 

@@ -96,13 +95,11 @@
 
 static inline jlong field_offset_from_byte_offset(jlong byte_offset) {
   return byte_offset;
 }
 
-static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
-  jlong byte_offset = field_offset_to_byte_offset(field_offset);
-
+static inline void assert_sane_byte_offset(oop p, jlong byte_offset) {
 #ifdef ASSERT
   if (p != NULL) {
     assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
     if (byte_offset == (jint)byte_offset) {
       void* ptr_plus_disp = (address)p + byte_offset;

@@ -111,11 +108,15 @@
     }
     jlong p_size = HeapWordSize * (jlong)(p->size());
     assert(byte_offset < p_size, "Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, byte_offset, p_size);
   }
 #endif
+}
 
+static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
+  jlong byte_offset = field_offset_to_byte_offset(field_offset);
+  assert_sane_byte_offset(p, byte_offset);
   if (sizeof(char*) == sizeof(jint)) {   // (this constant folds!)
     return (address)p + (jint) byte_offset;
   } else {
     return (address)p +        byte_offset;
   }

@@ -138,288 +139,145 @@
  *
  * Normalizes values and wraps accesses in
  * JavaThread::doing_unsafe_access() if needed.
  */
 class MemoryAccess : StackObj {
-  JavaThread* _thread;
-  jobject _obj;
-  jlong _offset;
-
-  // Resolves and returns the address of the memory access
-  void* addr() {
-    return index_oop_from_field_offset_long(JNIHandles::resolve(_obj), _offset);
-  }
+  JavaThread*const _thread;
+  const oop _obj;
+  const ptrdiff_t _offset;
 
   template <typename T>
-  T normalize_for_write(T x) {
+  T normalize_for_write(T x) const {
     return x;
   }
 
-  jboolean normalize_for_write(jboolean x) {
+  jboolean normalize_for_write(jboolean x) const {
     return x & 1;
   }
 
   template <typename T>
-  T normalize_for_read(T x) {
+  T normalize_for_read(T x) const {
     return x;
   }
 
-  jboolean normalize_for_read(jboolean x) {
+  jboolean normalize_for_read(jboolean x) const {
     return x != 0;
   }
 
   /**
    * Helper class to wrap memory accesses in JavaThread::doing_unsafe_access()
+   * native/off-heap access which may raise SIGBUS if accessing
+   * memory mapped file data in a region of the file which has
+   * been truncated and is now invalid
    */
   class GuardUnsafeAccess {
-    JavaThread* _thread;
-    bool _active;
+    JavaThread*const _thread;
 
   public:
-    GuardUnsafeAccess(JavaThread* thread, jobject _obj) : _thread(thread) {
-      if (JNIHandles::resolve(_obj) == NULL) {
-        // native/off-heap access which may raise SIGBUS if accessing
-        // memory mapped file data in a region of the file which has
-        // been truncated and is now invalid
+    GuardUnsafeAccess(JavaThread* thread) : _thread(thread) {
         _thread->set_doing_unsafe_access(true);
-        _active = true;
-      } else {
-        _active = false;
-      }
     }
 
     ~GuardUnsafeAccess() {
-      if (_active) {
         _thread->set_doing_unsafe_access(false);
       }
-    }
   };
 
 public:
   MemoryAccess(JavaThread* thread, jobject obj, jlong offset)
-    : _thread(thread), _obj(obj), _offset(offset) {
+    : _thread(thread), _obj(JNIHandles::resolve(obj)), _offset((ptrdiff_t)offset) {
+    assert_sane_byte_offset(_obj, offset);
   }
 
-  template <typename T>
-  T get() {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    T x = normalize_for_read(*p);
+  bool on_heap() const {
+    return !oopDesc::is_null(_obj);
+  }
 
-    return x;
+  template <typename T>
+  T get() const {
+    T x;
+    if (on_heap()) {
+      x = HeapAccess<ACCESS_ON_ANONYMOUS>::load_at(_obj, _offset);
+    } else {
+      GuardUnsafeAccess guard(_thread);
+      x = RawAccess<>::load((T*)_offset);
+    }
+    return normalize_for_read(x);
   }
 
   template <typename T>
   void put(T x) {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    *p = normalize_for_write(x);
+    x = normalize_for_write(x);
+    if (on_heap()) {
+      HeapAccess<ACCESS_ON_ANONYMOUS>::store_at(_obj, _offset, x);
+    } else {
+      GuardUnsafeAccess guard(_thread);
+      RawAccess<>::store((T*)_offset, x);
+    }
   }
 
 
   template <typename T>
   T get_volatile() {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-      OrderAccess::fence();
+    T x;
+    if (on_heap()) {
+      x = HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::load_at(_obj, _offset);
+    } else {
+      GuardUnsafeAccess guard(_thread);
+      x = RawAccess<MO_SEQ_CST>::load((T*)_offset);
     }
-
-    T x = OrderAccess::load_acquire((volatile T*)p);
-
     return normalize_for_read(x);
   }
 
   template <typename T>
   void put_volatile(T x) {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    T* p = (T*)addr();
-
-    OrderAccess::release_store_fence((volatile T*)p, normalize_for_write(x));
-  }
-
-
-#ifndef SUPPORTS_NATIVE_CX8
-  jlong get_jlong_locked() {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong* p = (jlong*)addr();
-
-    jlong x = Atomic::load(p);
-
-    return x;
+    x = normalize_for_write(x);
+    if (on_heap()) {
+      HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::store_at(_obj, _offset, x);
+    } else {
+      GuardUnsafeAccess guard(_thread);
+      RawAccess<MO_SEQ_CST>::store((T*)_offset, x);
   }
-
-  void put_jlong_locked(jlong x) {
-    GuardUnsafeAccess guard(_thread, _obj);
-
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong* p = (jlong*)addr();
-
-    Atomic::store(normalize_for_write(x),  p);
   }
-#endif
 };
 
-// Get/PutObject must be special-cased, since it works with handles.
-
-// We could be accessing the referent field in a reference
-// object. If G1 is enabled then we need to register non-null
-// referent with the SATB barrier.
-
-#if INCLUDE_ALL_GCS
-static bool is_java_lang_ref_Reference_access(oop o, jlong offset) {
-  if (offset == java_lang_ref_Reference::referent_offset && o != NULL) {
-    Klass* k = o->klass();
-    if (InstanceKlass::cast(k)->reference_type() != REF_NONE) {
-      assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
-      return true;
-    }
-  }
-  return false;
-}
-#endif
-
-static void ensure_satb_referent_alive(oop o, jlong offset, oop v) {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC && v != NULL && is_java_lang_ref_Reference_access(o, offset)) {
-    G1SATBCardTableModRefBS::enqueue(v);
-  }
-#endif
-}
-
 // These functions allow a null base pointer with an arbitrary address.
 // But if the base pointer is non-null, the offset should make some sense.
 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
   oop p = JNIHandles::resolve(obj);
-  oop v;
-
-  if (UseCompressedOops) {
-    narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset);
-    v = oopDesc::decode_heap_oop(n);
-  } else {
-    v = *(oop*)index_oop_from_field_offset_long(p, offset);
-  }
-
-  ensure_satb_referent_alive(p, offset, v);
-
+  assert_sane_byte_offset(p, offset);
+  oop v = HeapAccess<ACCESS_ON_ANONYMOUS>::oop_load_at(p, offset);
   return JNIHandles::make_local(env, v);
 } UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-
-  if (UseCompressedOops) {
-    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
-  } else {
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
-  }
+  assert_sane_byte_offset(p, offset);
+  HeapAccess<ACCESS_ON_ANONYMOUS>::oop_store_at(p, offset, x);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
   oop p = JNIHandles::resolve(obj);
-  void* addr = index_oop_from_field_offset_long(p, offset);
-
-  volatile oop v;
-
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-    OrderAccess::fence();
-  }
-
-  if (UseCompressedOops) {
-    volatile narrowOop n = *(volatile narrowOop*) addr;
-    (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
-  } else {
-    (void)const_cast<oop&>(v = *(volatile oop*) addr);
-  }
-
-  ensure_satb_referent_alive(p, offset, v);
-
-  OrderAccess::acquire();
+  assert_sane_byte_offset(p, offset);
+  oop v = HeapAccess<MO_SEQ_CST | ACCESS_ON_ANONYMOUS>::oop_load_at(p, offset);
   return JNIHandles::make_local(env, v);
 } UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-  void* addr = index_oop_from_field_offset_long(p, offset);
-  OrderAccess::release();
-
-  if (UseCompressedOops) {
-    oop_store((narrowOop*)addr, x);
-  } else {
-    oop_store((oop*)addr, x);
-  }
-
-  OrderAccess::fence();
+  assert_sane_byte_offset(p, offset);
+  HeapAccess<MO_SEQ_CST | ACCESS_ON_ANONYMOUS>::oop_store_at(p, offset, x);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) {
   oop v = *(oop*) (address) addr;
-
   return JNIHandles::make_local(env, v);
 } UNSAFE_END
 
-#ifndef SUPPORTS_NATIVE_CX8
-
-// VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
-//
-// On platforms which do not support atomic compare-and-swap of jlong (8 byte)
-// values we have to use a lock-based scheme to enforce atomicity. This has to be
-// applied to all Unsafe operations that set the value of a jlong field. Even so
-// the compareAndSwapLong operation will not be atomic with respect to direct stores
-// to the field from Java code. It is important therefore that any Java code that
-// utilizes these Unsafe jlong operations does not perform direct stores. To permit
-// direct loads of the field from Java code we must also use Atomic::store within the
-// locked regions. And for good measure, in case there are direct stores, we also
-// employ Atomic::load within those regions. Note that the field in question must be
-// volatile and so must have atomic load/store accesses applied at the Java level.
-//
-// The locking scheme could utilize a range of strategies for controlling the locking
-// granularity: from a lock per-field through to a single global lock. The latter is
-// the simplest and is used for the current implementation. Note that the Java object
-// that contains the field, can not, in general, be used for locking. To do so can lead
-// to deadlocks as we may introduce locking into what appears to the Java code to be a
-// lock-free path.
-//
-// As all the locked-regions are very short and themselves non-blocking we can treat
-// them as leaf routines and elide safepoint checks (ie we don't perform any thread
-// state transitions even when blocking for the lock). Note that if we do choose to
-// add safepoint checks and thread state transitions, we must ensure that we calculate
-// the address of the field _after_ we have acquired the lock, else the object may have
-// been moved by the GC
-
-UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
-  if (VM_Version::supports_cx8()) {
-    return MemoryAccess(thread, obj, offset).get_volatile<jlong>();
-  } else {
-    return MemoryAccess(thread, obj, offset).get_jlong_locked();
-  }
-} UNSAFE_END
-
-UNSAFE_ENTRY(void, Unsafe_PutLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
-  if (VM_Version::supports_cx8()) {
-    MemoryAccess(thread, obj, offset).put_volatile<jlong>(x);
-  } else {
-    MemoryAccess(thread, obj, offset).put_jlong_locked(x);
-  }
-} UNSAFE_END
-
-#endif // not SUPPORTS_NATIVE_CX8
-
 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
 #ifdef VM_LITTLE_ENDIAN
   return false;
 #else
   return true;

@@ -468,17 +326,14 @@
 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
 DEFINE_GETSETOOP_VOLATILE(jint, Int);
+DEFINE_GETSETOOP_VOLATILE(jlong, Long);
 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 
-#ifdef SUPPORTS_NATIVE_CX8
-DEFINE_GETSETOOP_VOLATILE(jlong, Long);
-#endif
-
 #undef DEFINE_GETSETOOP_VOLATILE
 
 UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
   OrderAccess::acquire();
 } UNSAFE_END

@@ -974,90 +829,42 @@
 
 UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop e = JNIHandles::resolve(e_h);
   oop p = JNIHandles::resolve(obj);
-  HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
-  oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
-  if (res == e) {
-    update_barrier_set((void*)addr, x);
-  }
+  assert_sane_byte_offset(p, offset);
+  oop res = HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::oop_cas_at(x, p, (ptrdiff_t)offset, e);
   return JNIHandles::make_local(env, res);
 } UNSAFE_END
 
-UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
-  oop p = JNIHandles::resolve(obj);
-  jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
-
-  return (jint)(Atomic::cmpxchg(x, addr, e));
-} UNSAFE_END
-
-UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
-  Handle p(THREAD, JNIHandles::resolve(obj));
-  jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
-
-#ifdef SUPPORTS_NATIVE_CX8
-  return (jlong)(Atomic::cmpxchg(x, addr, e));
-#else
-  if (VM_Version::supports_cx8()) {
-    return (jlong)(Atomic::cmpxchg(x, addr, e));
-  } else {
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong val = Atomic::load(addr);
-    if (val == e) {
-      Atomic::store(x, addr);
-    }
-    return val;
-  }
-#endif
-} UNSAFE_END
-
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
   oop x = JNIHandles::resolve(x_h);
   oop e = JNIHandles::resolve(e_h);
   oop p = JNIHandles::resolve(obj);
-  HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
-  oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
-  if (res != e) {
-    return false;
-  }
-
-  update_barrier_set((void*)addr, x);
-
-  return true;
-} UNSAFE_END
-
-UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
-  oop p = JNIHandles::resolve(obj);
-  jint* addr = (jint *)index_oop_from_field_offset_long(p, offset);
-
-  return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
+  assert_sane_byte_offset(p, offset);
+  oop res = HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::oop_cas_at(x, p, (ptrdiff_t)offset, e);
+  return res == e;
 } UNSAFE_END
 
-UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
-  Handle p(THREAD, JNIHandles::resolve(obj));
-  jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
-
-#ifdef SUPPORTS_NATIVE_CX8
-  return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
-#else
-  if (VM_Version::supports_cx8()) {
-    return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
-  } else {
-    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
-
-    jlong val = Atomic::load(addr);
-    if (val != e) {
-      return false;
-    }
+#define DEFINE_ATOMICOOP(java_type, Type) \
+ \
+UNSAFE_ENTRY(java_type, Unsafe_CompareAndExchange##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type e, java_type x)) { \
+  Handle p(THREAD, JNIHandles::resolve(obj)); \
+  assert_sane_byte_offset(p(), offset); \
+  return HeapAccess<ACCESS_ON_ANONYMOUS>::cas_at(x, p(), (ptrdiff_t)offset, e); \
+} UNSAFE_END \
+ \
+UNSAFE_ENTRY(java_type, Unsafe_CompareAndSwap##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type e, java_type x)) { \
+  Handle p(THREAD, JNIHandles::resolve(obj)); \
+  assert_sane_byte_offset(p(), offset); \
+  return HeapAccess<ACCESS_ON_ANONYMOUS>::cas_at(x, p(), (ptrdiff_t)offset, e) == e; \
+} UNSAFE_END \
+ \
+// END DEFINE_ATOMICOOP.
 
-    Atomic::store(x, addr);
-    return true;
-  }
-#endif
-} UNSAFE_END
+DEFINE_ATOMICOOP(jint, Int);
+DEFINE_ATOMICOOP(jlong, Long);
 
 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) {
   EventThreadPark event;
   HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time);
 
< prev index next >