< prev index next >

src/share/vm/prims/unsafe.cpp

Print this page
rev 12906 : [mq]: gc_interface


  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classFileStream.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayOop.inline.hpp"

  31 #include "oops/oop.inline.hpp"
  32 #include "prims/jni.h"
  33 #include "prims/jvm.h"
  34 #include "prims/unsafe.hpp"

  35 #include "runtime/atomic.hpp"
  36 #include "runtime/globals.hpp"
  37 #include "runtime/interfaceSupport.hpp"
  38 #include "runtime/orderAccess.inline.hpp"
  39 #include "runtime/reflection.hpp"
  40 #include "runtime/vm_version.hpp"
  41 #include "services/threadService.hpp"
  42 #include "trace/tracing.hpp"
  43 #include "utilities/copy.hpp"
  44 #include "utilities/dtrace.hpp"
  45 #include "utilities/macros.hpp"
  46 #if INCLUDE_ALL_GCS
  47 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  48 #endif // INCLUDE_ALL_GCS
  49 
  50 /**
  51  * Implementation of the jdk.internal.misc.Unsafe class
  52  */
  53 
  54 
  55 #define MAX_OBJECT_SIZE \
  56   ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
  57     + ((julong)max_jint * sizeof(double)) )
  58 
  59 
  60 #define UNSAFE_ENTRY(result_type, header) \
  61   JVM_ENTRY(static result_type, header)
  62 
  63 #define UNSAFE_LEAF(result_type, header) \
  64   JVM_LEAF(static result_type, header)
  65 
  66 #define UNSAFE_END JVM_END
  67 
  68 


  81 
  82 
  83 // Note: The VM's obj_field and related accessors use byte-scaled
  84 // ("unscaled") offsets, just as the unsafe methods do.
  85 
  86 // However, the method Unsafe.fieldOffset explicitly declines to
  87 // guarantee this.  The field offset values manipulated by the Java user
  88 // through the Unsafe API are opaque cookies that just happen to be byte
  89 // offsets.  We represent this state of affairs by passing the cookies
  90 // through conversion functions when going between the VM and the Unsafe API.
  91 // The conversion functions just happen to be no-ops at present.
  92 
  93 static inline jlong field_offset_to_byte_offset(jlong field_offset) {
  94   return field_offset;
  95 }
  96 
  97 static inline jlong field_offset_from_byte_offset(jlong byte_offset) {
  98   return byte_offset;
  99 }
 100 
 101 static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
 102   jlong byte_offset = field_offset_to_byte_offset(field_offset);
 103 
 104 #ifdef ASSERT
 105   if (p != NULL) {
 106     assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
 107     if (byte_offset == (jint)byte_offset) {
 108       void* ptr_plus_disp = (address)p + byte_offset;
 109       assert((void*)p->obj_field_addr<oop>((jint)byte_offset) == ptr_plus_disp,
 110              "raw [ptr+disp] must be consistent with oop::field_base");
 111     }
 112     jlong p_size = HeapWordSize * (jlong)(p->size());
 113     assert(byte_offset < p_size, "Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, byte_offset, p_size);
 114   }
 115 #endif

 116 



 117   if (sizeof(char*) == sizeof(jint)) {   // (this constant folds!)
 118     return (address)p + (jint) byte_offset;
 119   } else {
 120     return (address)p +        byte_offset;
 121   }
 122 }
 123 
 124 // Externally callable versions:
 125 // (Use these in compiler intrinsics which emulate unsafe primitives.)
 126 jlong Unsafe_field_offset_to_byte_offset(jlong field_offset) {
 127   return field_offset;
 128 }
 129 jlong Unsafe_field_offset_from_byte_offset(jlong byte_offset) {
 130   return byte_offset;
 131 }
 132 
 133 
 134 ///// Data read/writes on the Java heap and in native (off-heap) memory
 135 
 136 /**
 137  * Helper class for accessing memory.
 138  *
 139  * Normalizes values and wraps accesses in
 140  * JavaThread::doing_unsafe_access() if needed.
 141  */
 142 class MemoryAccess : StackObj {
 143   JavaThread* _thread;
 144   jobject _obj;
 145   jlong _offset;
 146 
 147   // Resolves and returns the address of the memory access
 148   void* addr() {
 149     return index_oop_from_field_offset_long(JNIHandles::resolve(_obj), _offset);
 150   }
 151 
 152   template <typename T>
 153   T normalize_for_write(T x) {
 154     return x;
 155   }
 156 
 157   jboolean normalize_for_write(jboolean x) {
 158     return x & 1;
 159   }
 160 
 161   template <typename T>
 162   T normalize_for_read(T x) {
 163     return x;
 164   }
 165 
 166   jboolean normalize_for_read(jboolean x) {
 167     return x != 0;
 168   }
 169 
 170   /**
 171    * Helper class to wrap memory accesses in JavaThread::doing_unsafe_access()



 172    */
 173   class GuardUnsafeAccess {
 174     JavaThread* _thread;
 175     bool _active;
 176 
 177   public:
 178     GuardUnsafeAccess(JavaThread* thread, jobject _obj) : _thread(thread) {
 179       if (JNIHandles::resolve(_obj) == NULL) {
 180         // native/off-heap access which may raise SIGBUS if accessing
 181         // memory mapped file data in a region of the file which has
 182         // been truncated and is now invalid
 183         _thread->set_doing_unsafe_access(true);
 184         _active = true;
 185       } else {
 186         _active = false;
 187       }
 188     }
 189 
 190     ~GuardUnsafeAccess() {
 191       if (_active) {
 192         _thread->set_doing_unsafe_access(false);
 193       }
 194     }
 195   };
 196 
 197 public:
 198   MemoryAccess(JavaThread* thread, jobject obj, jlong offset)
 199     : _thread(thread), _obj(obj), _offset(offset) {

 200   }
 201 
 202   template <typename T>
 203   T get() {
 204     GuardUnsafeAccess guard(_thread, _obj);
 205 
 206     T* p = (T*)addr();
 207 
 208     T x = normalize_for_read(*p);
 209 
 210     return x;









 211   }
 212 
 213   template <typename T>
 214   void put(T x) {
 215     GuardUnsafeAccess guard(_thread, _obj);
 216 
 217     T* p = (T*)addr();
 218 
 219     *p = normalize_for_write(x);


 220   }
 221 
 222 
 223   template <typename T>
 224   T get_volatile() {
 225     GuardUnsafeAccess guard(_thread, _obj);
 226 
 227     T* p = (T*)addr();
 228 
 229     if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 230       OrderAccess::fence();
 231     }
 232 
 233     T x = OrderAccess::load_acquire((volatile T*)p);
 234 
 235     return normalize_for_read(x);
 236   }
 237 
 238   template <typename T>
 239   void put_volatile(T x) {
 240     GuardUnsafeAccess guard(_thread, _obj);
 241 
 242     T* p = (T*)addr();
 243 
 244     OrderAccess::release_store_fence((volatile T*)p, normalize_for_write(x));
 245   }
 246 
 247 
 248 #ifndef SUPPORTS_NATIVE_CX8
 249   jlong get_jlong_locked() {
 250     GuardUnsafeAccess guard(_thread, _obj);
 251 
 252     MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
 253 
 254     jlong* p = (jlong*)addr();
 255 
 256     jlong x = Atomic::load(p);
 257 
 258     return x;
 259   }
 260 
 261   void put_jlong_locked(jlong x) {
 262     GuardUnsafeAccess guard(_thread, _obj);
 263 
 264     MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
 265 
 266     jlong* p = (jlong*)addr();
 267 
 268     Atomic::store(normalize_for_write(x),  p);
 269   }
 270 #endif
 271 };
 272 
 273 // Get/PutObject must be special-cased, since it works with handles.
 274 
 275 // We could be accessing the referent field in a reference
 276 // object. If G1 is enabled then we need to register non-null
 277 // referent with the SATB barrier.
 278 
 279 #if INCLUDE_ALL_GCS
 280 static bool is_java_lang_ref_Reference_access(oop o, jlong offset) {
 281   if (offset == java_lang_ref_Reference::referent_offset && o != NULL) {
 282     Klass* k = o->klass();
 283     if (InstanceKlass::cast(k)->reference_type() != REF_NONE) {
 284       assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
 285       return true;
 286     }
 287   }
 288   return false;
 289 }
 290 #endif
 291 
 292 static void ensure_satb_referent_alive(oop o, jlong offset, oop v) {
 293 #if INCLUDE_ALL_GCS
 294   if (UseG1GC && v != NULL && is_java_lang_ref_Reference_access(o, offset)) {
 295     G1SATBCardTableModRefBS::enqueue(v);
 296   }
 297 #endif
 298 }
 299 
 300 // These functions allow a null base pointer with an arbitrary address.
 301 // But if the base pointer is non-null, the offset should make some sense.
 302 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
 303 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
 304   oop p = JNIHandles::resolve(obj);
 305   oop v;
 306 
 307   if (UseCompressedOops) {
 308     narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset);
 309     v = oopDesc::decode_heap_oop(n);
 310   } else {
 311     v = *(oop*)index_oop_from_field_offset_long(p, offset);
 312   }
 313 
 314   ensure_satb_referent_alive(p, offset, v);
 315 
 316   return JNIHandles::make_local(env, v);
 317 } UNSAFE_END
 318 
 319 UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
 320   oop x = JNIHandles::resolve(x_h);
 321   oop p = JNIHandles::resolve(obj);
 322 
 323   if (UseCompressedOops) {
 324     oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
 325   } else {
 326     oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
 327   }
 328 } UNSAFE_END
 329 
 330 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
 331   oop p = JNIHandles::resolve(obj);
 332   void* addr = index_oop_from_field_offset_long(p, offset);
 333 
 334   volatile oop v;
 335 
 336   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 337     OrderAccess::fence();
 338   }
 339 
 340   if (UseCompressedOops) {
 341     volatile narrowOop n = *(volatile narrowOop*) addr;
 342     (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
 343   } else {
 344     (void)const_cast<oop&>(v = *(volatile oop*) addr);
 345   }
 346 
 347   ensure_satb_referent_alive(p, offset, v);
 348 
 349   OrderAccess::acquire();
 350   return JNIHandles::make_local(env, v);
 351 } UNSAFE_END
 352 
 353 UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
 354   oop x = JNIHandles::resolve(x_h);
 355   oop p = JNIHandles::resolve(obj);
 356   void* addr = index_oop_from_field_offset_long(p, offset);
 357   OrderAccess::release();
 358 
 359   if (UseCompressedOops) {
 360     oop_store((narrowOop*)addr, x);
 361   } else {
 362     oop_store((oop*)addr, x);
 363   }
 364 
 365   OrderAccess::fence();
 366 } UNSAFE_END
 367 
 368 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) {
 369   oop v = *(oop*) (address) addr;
 370 
 371   return JNIHandles::make_local(env, v);
 372 } UNSAFE_END
 373 
 374 #ifndef SUPPORTS_NATIVE_CX8
 375 
 376 // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
 377 //
 378 // On platforms which do not support atomic compare-and-swap of jlong (8 byte)
 379 // values we have to use a lock-based scheme to enforce atomicity. This has to be
 380 // applied to all Unsafe operations that set the value of a jlong field. Even so
 381 // the compareAndSwapLong operation will not be atomic with respect to direct stores
 382 // to the field from Java code. It is important therefore that any Java code that
 383 // utilizes these Unsafe jlong operations does not perform direct stores. To permit
 384 // direct loads of the field from Java code we must also use Atomic::store within the
 385 // locked regions. And for good measure, in case there are direct stores, we also
 386 // employ Atomic::load within those regions. Note that the field in question must be
 387 // volatile and so must have atomic load/store accesses applied at the Java level.
 388 //
 389 // The locking scheme could utilize a range of strategies for controlling the locking
 390 // granularity: from a lock per-field through to a single global lock. The latter is
 391 // the simplest and is used for the current implementation. Note that the Java object
 392 // that contains the field, can not, in general, be used for locking. To do so can lead
 393 // to deadlocks as we may introduce locking into what appears to the Java code to be a
 394 // lock-free path.
 395 //
 396 // As all the locked-regions are very short and themselves non-blocking we can treat
 397 // them as leaf routines and elide safepoint checks (ie we don't perform any thread
 398 // state transitions even when blocking for the lock). Note that if we do choose to
 399 // add safepoint checks and thread state transitions, we must ensure that we calculate
 400 // the address of the field _after_ we have acquired the lock, else the object may have
 401 // been moved by the GC
 402 
 403 UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
 404   if (VM_Version::supports_cx8()) {
 405     return MemoryAccess(thread, obj, offset).get_volatile<jlong>();
 406   } else {
 407     return MemoryAccess(thread, obj, offset).get_jlong_locked();
 408   }
 409 } UNSAFE_END
 410 
 411 UNSAFE_ENTRY(void, Unsafe_PutLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
 412   if (VM_Version::supports_cx8()) {
 413     MemoryAccess(thread, obj, offset).put_volatile<jlong>(x);
 414   } else {
 415     MemoryAccess(thread, obj, offset).put_jlong_locked(x);
 416   }
 417 } UNSAFE_END
 418 
 419 #endif // not SUPPORTS_NATIVE_CX8
 420 
 421 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
 422 #ifdef VM_LITTLE_ENDIAN
 423   return false;
 424 #else
 425   return true;
 426 #endif
 427 } UNSAFE_END
 428 
 429 UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
 430   return UseUnalignedAccesses;
 431 } UNSAFE_END
 432 
 433 #define DEFINE_GETSETOOP(java_type, Type) \
 434  \
 435 UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
 436   return MemoryAccess(thread, obj, offset).get<java_type>(); \
 437 } UNSAFE_END \
 438  \
 439 UNSAFE_ENTRY(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
 440   MemoryAccess(thread, obj, offset).put<java_type>(x); \


 453 
 454 #undef DEFINE_GETSETOOP
 455 
 456 #define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
 457  \
 458 UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
 459   return MemoryAccess(thread, obj, offset).get_volatile<java_type>(); \
 460 } UNSAFE_END \
 461  \
 462 UNSAFE_ENTRY(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
 463   MemoryAccess(thread, obj, offset).put_volatile<java_type>(x); \
 464 } UNSAFE_END \
 465  \
 466 // END DEFINE_GETSETOOP_VOLATILE.
 467 
 468 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
 469 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
 470 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
 471 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
 472 DEFINE_GETSETOOP_VOLATILE(jint, Int);

 473 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 474 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 475 
 476 #ifdef SUPPORTS_NATIVE_CX8
 477 DEFINE_GETSETOOP_VOLATILE(jlong, Long);
 478 #endif
 479 
 480 #undef DEFINE_GETSETOOP_VOLATILE
 481 
 482 UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
 483   OrderAccess::acquire();
 484 } UNSAFE_END
 485 
 486 UNSAFE_LEAF(void, Unsafe_StoreFence(JNIEnv *env, jobject unsafe)) {
 487   OrderAccess::release();
 488 } UNSAFE_END
 489 
 490 UNSAFE_LEAF(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe)) {
 491   OrderAccess::fence();
 492 } UNSAFE_END
 493 
 494 ////// Allocation requests
 495 
 496 UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
 497   ThreadToNativeFromVM ttnfv(thread);
 498   return env->AllocObject(cls);
 499 } UNSAFE_END


 959   }
 960 
 961   // let caller initialize it as needed...
 962 
 963   return (jclass) res_jh;
 964 } UNSAFE_END
 965 
 966 
 967 
 968 UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr)) {
 969   ThreadToNativeFromVM ttnfv(thread);
 970   env->Throw(thr);
 971 } UNSAFE_END
 972 
 973 // JSR166 ------------------------------------------------------------------
 974 
 975 UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
 976   oop x = JNIHandles::resolve(x_h);
 977   oop e = JNIHandles::resolve(e_h);
 978   oop p = JNIHandles::resolve(obj);
 979   HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
 980   oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
 981   if (res == e) {
 982     update_barrier_set((void*)addr, x);
 983   }
 984   return JNIHandles::make_local(env, res);
 985 } UNSAFE_END
 986 
 987 UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
 988   oop p = JNIHandles::resolve(obj);
 989   jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
 990 
 991   return (jint)(Atomic::cmpxchg(x, addr, e));
 992 } UNSAFE_END
 993 
 994 UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
 995   Handle p(THREAD, JNIHandles::resolve(obj));
 996   jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
 997 
 998 #ifdef SUPPORTS_NATIVE_CX8
 999   return (jlong)(Atomic::cmpxchg(x, addr, e));
1000 #else
1001   if (VM_Version::supports_cx8()) {
1002     return (jlong)(Atomic::cmpxchg(x, addr, e));
1003   } else {
1004     MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
1005 
1006     jlong val = Atomic::load(addr);
1007     if (val == e) {
1008       Atomic::store(x, addr);
1009     }
1010     return val;
1011   }
1012 #endif
1013 } UNSAFE_END
1014 
1015 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
1016   oop x = JNIHandles::resolve(x_h);
1017   oop e = JNIHandles::resolve(e_h);
1018   oop p = JNIHandles::resolve(obj);
1019   HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
1020   oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
1021   if (res != e) {
1022     return false;
1023   }
1024 
1025   update_barrier_set((void*)addr, x);
1026 
1027   return true;
1028 } UNSAFE_END
1029 
1030 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
1031   oop p = JNIHandles::resolve(obj);
1032   jint* addr = (jint *)index_oop_from_field_offset_long(p, offset);
1033 
1034   return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
1035 } UNSAFE_END
1036 
1037 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
1038   Handle p(THREAD, JNIHandles::resolve(obj));
1039   jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
1040 
1041 #ifdef SUPPORTS_NATIVE_CX8
1042   return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
1043 #else
1044   if (VM_Version::supports_cx8()) {
1045     return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
1046   } else {
1047     MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
1048 
1049     jlong val = Atomic::load(addr);
1050     if (val != e) {
1051       return false;
1052     }
1053 
1054     Atomic::store(x, addr);
1055     return true;
1056   }
1057 #endif
1058 } UNSAFE_END
1059 
1060 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) {
1061   EventThreadPark event;
1062   HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time);
1063 
1064   JavaThreadParkedState jtps(thread, time != 0);
1065   thread->parker()->park(isAbsolute != 0, time);
1066 
1067   HOTSPOT_THREAD_PARK_END((uintptr_t) thread->parker());
1068 
1069   if (event.should_commit()) {
1070     oop obj = thread->current_park_blocker();
1071     event.set_parkedClass((obj != NULL) ? obj->klass() : NULL);
1072     event.set_timeout(time);
1073     event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
1074     event.commit();
1075   }
1076 } UNSAFE_END
1077 
1078 UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) {




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classFileStream.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayOop.inline.hpp"
  31 #include "oops/typeArrayOop.inline.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "prims/jni.h"
  34 #include "prims/jvm.h"
  35 #include "prims/unsafe.hpp"
  36 #include "runtime/access.inline.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/interfaceSupport.hpp"
  40 #include "runtime/orderAccess.inline.hpp"
  41 #include "runtime/reflection.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "services/threadService.hpp"
  44 #include "trace/tracing.hpp"
  45 #include "utilities/copy.hpp"
  46 #include "utilities/dtrace.hpp"
  47 #include "utilities/macros.hpp"



  48 
  49 /**
  50  * Implementation of the jdk.internal.misc.Unsafe class
  51  */
  52 
  53 
  54 #define MAX_OBJECT_SIZE \
  55   ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
  56     + ((julong)max_jint * sizeof(double)) )
  57 
  58 
  59 #define UNSAFE_ENTRY(result_type, header) \
  60   JVM_ENTRY(static result_type, header)
  61 
  62 #define UNSAFE_LEAF(result_type, header) \
  63   JVM_LEAF(static result_type, header)
  64 
  65 #define UNSAFE_END JVM_END
  66 
  67 


  80 
  81 
  82 // Note: The VM's obj_field and related accessors use byte-scaled
  83 // ("unscaled") offsets, just as the unsafe methods do.
  84 
  85 // However, the method Unsafe.fieldOffset explicitly declines to
  86 // guarantee this.  The field offset values manipulated by the Java user
  87 // through the Unsafe API are opaque cookies that just happen to be byte
  88 // offsets.  We represent this state of affairs by passing the cookies
  89 // through conversion functions when going between the VM and the Unsafe API.
  90 // The conversion functions just happen to be no-ops at present.
  91 
  92 static inline jlong field_offset_to_byte_offset(jlong field_offset) {
  93   return field_offset;
  94 }
  95 
  96 static inline jlong field_offset_from_byte_offset(jlong byte_offset) {
  97   return byte_offset;
  98 }
  99 
 100 static inline void assert_sane_byte_offset(oop p, jlong byte_offset) {


 101 #ifdef ASSERT
 102   if (p != NULL) {
 103     assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
 104     if (byte_offset == (jint)byte_offset) {
 105       void* ptr_plus_disp = (address)p + byte_offset;
 106       assert((void*)p->obj_field_addr<oop>((jint)byte_offset) == ptr_plus_disp,
 107              "raw [ptr+disp] must be consistent with oop::field_base");
 108     }
 109     jlong p_size = HeapWordSize * (jlong)(p->size());
 110     assert(byte_offset < p_size, "Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, byte_offset, p_size);
 111   }
 112 #endif
 113 }
 114 
 115 static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
 116   jlong byte_offset = field_offset_to_byte_offset(field_offset);
 117   assert_sane_byte_offset(p, byte_offset);
 118   if (sizeof(char*) == sizeof(jint)) {   // (this constant folds!)
 119     return (address)p + (jint) byte_offset;
 120   } else {
 121     return (address)p +        byte_offset;
 122   }
 123 }
 124 
 125 // Externally callable versions:
 126 // (Use these in compiler intrinsics which emulate unsafe primitives.)
 127 jlong Unsafe_field_offset_to_byte_offset(jlong field_offset) {
 128   return field_offset;
 129 }
 130 jlong Unsafe_field_offset_from_byte_offset(jlong byte_offset) {
 131   return byte_offset;
 132 }
 133 
 134 
 135 ///// Data read/writes on the Java heap and in native (off-heap) memory
 136 
 137 /**
 138  * Helper class for accessing memory.
 139  *
 140  * Normalizes values and wraps accesses in
 141  * JavaThread::doing_unsafe_access() if needed.
 142  */
 143 class MemoryAccess : StackObj {
 144   JavaThread*const _thread;
 145   const oop _obj;
 146   const ptrdiff_t _offset;





 147 
 148   template <typename T>
 149   T normalize_for_write(T x) const {
 150     return x;
 151   }
 152 
 153   jboolean normalize_for_write(jboolean x) const {
 154     return x & 1;
 155   }
 156 
 157   template <typename T>
 158   T normalize_for_read(T x) const {
 159     return x;
 160   }
 161 
 162   jboolean normalize_for_read(jboolean x) const {
 163     return x != 0;
 164   }
 165 
 166   /**
 167    * Helper class to wrap memory accesses in JavaThread::doing_unsafe_access()
 168    * native/off-heap access which may raise SIGBUS if accessing
 169    * memory mapped file data in a region of the file which has
 170    * been truncated and is now invalid
 171    */
 172   class GuardUnsafeAccess {
 173     JavaThread*const _thread;

 174 
 175   public:
 176     GuardUnsafeAccess(JavaThread* thread) : _thread(thread) {




 177         _thread->set_doing_unsafe_access(true);




 178     }
 179 
 180     ~GuardUnsafeAccess() {

 181         _thread->set_doing_unsafe_access(false);
 182     }

 183   };
 184 
 185 public:
 186   MemoryAccess(JavaThread* thread, jobject obj, jlong offset)
 187     : _thread(thread), _obj(JNIHandles::resolve(obj)), _offset((ptrdiff_t)offset) {
 188     assert_sane_byte_offset(_obj, offset);
 189   }
 190 
 191   bool on_heap() const {
 192     return !oopDesc::is_null(_obj);
 193   }




 194 
 195   template <typename T>
 196   T get() const {
 197     T x;
 198     if (on_heap()) {
 199       x = HeapAccess<ACCESS_ON_ANONYMOUS>::load_at(_obj, _offset);
 200     } else {
 201       GuardUnsafeAccess guard(_thread);
 202       x = RawAccess<>::load((T*)_offset);
 203     }
 204     return normalize_for_read(x);
 205   }
 206 
 207   template <typename T>
 208   void put(T x) {
 209     x = normalize_for_write(x);
 210     if (on_heap()) {
 211       HeapAccess<ACCESS_ON_ANONYMOUS>::store_at(_obj, _offset, x);
 212     } else {
 213       GuardUnsafeAccess guard(_thread);
 214       RawAccess<>::store((T*)_offset, x);
 215     }
 216   }
 217 
 218 
 219   template <typename T>
 220   T get_volatile() {
 221     T x;
 222     if (on_heap()) {
 223       x = HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::load_at(_obj, _offset);
 224     } else {
 225       GuardUnsafeAccess guard(_thread);
 226       x = RawAccess<MO_SEQ_CST>::load((T*)_offset);
 227     }



 228     return normalize_for_read(x);
 229   }
 230 
 231   template <typename T>
 232   void put_volatile(T x) {
 233     x = normalize_for_write(x);
 234     if (on_heap()) {
 235       HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::store_at(_obj, _offset, x);
 236     } else {
 237       GuardUnsafeAccess guard(_thread);
 238       RawAccess<MO_SEQ_CST>::store((T*)_offset, x);













 239     }









 240   }

 241 };
 242 



























 243 // These functions allow a null base pointer with an arbitrary address.
 244 // But if the base pointer is non-null, the offset should make some sense.
 245 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
 246 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
 247   oop p = JNIHandles::resolve(obj);
 248   assert_sane_byte_offset(p, offset);
 249   oop v = HeapAccess<ACCESS_ON_ANONYMOUS>::oop_load_at(p, offset);









 250   return JNIHandles::make_local(env, v);
 251 } UNSAFE_END
 252 
 253 UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
 254   oop x = JNIHandles::resolve(x_h);
 255   oop p = JNIHandles::resolve(obj);
 256   assert_sane_byte_offset(p, offset);
 257   HeapAccess<ACCESS_ON_ANONYMOUS>::oop_store_at(p, offset, x);




 258 } UNSAFE_END
 259 
 260 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
 261   oop p = JNIHandles::resolve(obj);
 262   assert_sane_byte_offset(p, offset);
 263   oop v = HeapAccess<MO_SEQ_CST | ACCESS_ON_ANONYMOUS>::oop_load_at(p, offset);
















 264   return JNIHandles::make_local(env, v);
 265 } UNSAFE_END
 266 
 267 UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
 268   oop x = JNIHandles::resolve(x_h);
 269   oop p = JNIHandles::resolve(obj);
 270   assert_sane_byte_offset(p, offset);
 271   HeapAccess<MO_SEQ_CST | ACCESS_ON_ANONYMOUS>::oop_store_at(p, offset, x);








 272 } UNSAFE_END
 273 
 274 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) {
 275   oop v = *(oop*) (address) addr;

 276   return JNIHandles::make_local(env, v);
 277 } UNSAFE_END
 278 















































 279 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
 280 #ifdef VM_LITTLE_ENDIAN
 281   return false;
 282 #else
 283   return true;
 284 #endif
 285 } UNSAFE_END
 286 
 287 UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
 288   return UseUnalignedAccesses;
 289 } UNSAFE_END
 290 
 291 #define DEFINE_GETSETOOP(java_type, Type) \
 292  \
 293 UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
 294   return MemoryAccess(thread, obj, offset).get<java_type>(); \
 295 } UNSAFE_END \
 296  \
 297 UNSAFE_ENTRY(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
 298   MemoryAccess(thread, obj, offset).put<java_type>(x); \


 311 
 312 #undef DEFINE_GETSETOOP
 313 
 314 #define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
 315  \
 316 UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
 317   return MemoryAccess(thread, obj, offset).get_volatile<java_type>(); \
 318 } UNSAFE_END \
 319  \
 320 UNSAFE_ENTRY(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
 321   MemoryAccess(thread, obj, offset).put_volatile<java_type>(x); \
 322 } UNSAFE_END \
 323  \
 324 // END DEFINE_GETSETOOP_VOLATILE.
 325 
 326 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
 327 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
 328 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
 329 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
 330 DEFINE_GETSETOOP_VOLATILE(jint, Int);
 331 DEFINE_GETSETOOP_VOLATILE(jlong, Long);
 332 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 333 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 334 




 335 #undef DEFINE_GETSETOOP_VOLATILE
 336 
 337 UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
 338   OrderAccess::acquire();
 339 } UNSAFE_END
 340 
 341 UNSAFE_LEAF(void, Unsafe_StoreFence(JNIEnv *env, jobject unsafe)) {
 342   OrderAccess::release();
 343 } UNSAFE_END
 344 
 345 UNSAFE_LEAF(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe)) {
 346   OrderAccess::fence();
 347 } UNSAFE_END
 348 
 349 ////// Allocation requests
 350 
 351 UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
 352   ThreadToNativeFromVM ttnfv(thread);
 353   return env->AllocObject(cls);
 354 } UNSAFE_END


 814   }
 815 
 816   // let caller initialize it as needed...
 817 
 818   return (jclass) res_jh;
 819 } UNSAFE_END
 820 
 821 
 822 
 823 UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr)) {
 824   ThreadToNativeFromVM ttnfv(thread);
 825   env->Throw(thr);
 826 } UNSAFE_END
 827 
 828 // JSR166 ------------------------------------------------------------------
 829 
 830 UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
 831   oop x = JNIHandles::resolve(x_h);
 832   oop e = JNIHandles::resolve(e_h);
 833   oop p = JNIHandles::resolve(obj);
 834   assert_sane_byte_offset(p, offset);
 835   oop res = HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::oop_cas_at(x, p, (ptrdiff_t)offset, e);



 836   return JNIHandles::make_local(env, res);
 837 } UNSAFE_END
 838 




























 839 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
 840   oop x = JNIHandles::resolve(x_h);
 841   oop e = JNIHandles::resolve(e_h);
 842   oop p = JNIHandles::resolve(obj);
 843   assert_sane_byte_offset(p, offset);
 844   oop res = HeapAccess<ACCESS_ON_ANONYMOUS | MO_SEQ_CST>::oop_cas_at(x, p, (ptrdiff_t)offset, e);
 845   return res == e;













 846 } UNSAFE_END
 847 
 848 #define DEFINE_ATOMICOOP(java_type, Type) \
 849  \
 850 UNSAFE_ENTRY(java_type, Unsafe_CompareAndExchange##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type e, java_type x)) { \
 851   Handle p(THREAD, JNIHandles::resolve(obj)); \
 852   assert_sane_byte_offset(p(), offset); \
 853   return HeapAccess<ACCESS_ON_ANONYMOUS>::cas_at(x, p(), (ptrdiff_t)offset, e); \
 854 } UNSAFE_END \
 855  \
 856 UNSAFE_ENTRY(java_type, Unsafe_CompareAndSwap##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type e, java_type x)) { \
 857   Handle p(THREAD, JNIHandles::resolve(obj)); \
 858   assert_sane_byte_offset(p(), offset); \
 859   return HeapAccess<ACCESS_ON_ANONYMOUS>::cas_at(x, p(), (ptrdiff_t)offset, e) == e; \
 860 } UNSAFE_END \
 861  \
 862 // END DEFINE_ATOMICOOP.

 863 
 864 DEFINE_ATOMICOOP(jint, Int);
 865 DEFINE_ATOMICOOP(jlong, Long);



 866 
 867 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) {
 868   EventThreadPark event;
 869   HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time);
 870 
 871   JavaThreadParkedState jtps(thread, time != 0);
 872   thread->parker()->park(isAbsolute != 0, time);
 873 
 874   HOTSPOT_THREAD_PARK_END((uintptr_t) thread->parker());
 875 
 876   if (event.should_commit()) {
 877     oop obj = thread->current_park_blocker();
 878     event.set_parkedClass((obj != NULL) ? obj->klass() : NULL);
 879     event.set_timeout(time);
 880     event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
 881     event.commit();
 882   }
 883 } UNSAFE_END
 884 
 885 UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) {


< prev index next >