47 #endif // INCLUDE_ALL_GCS
48
49 /**
50 * Implementation of the jdk.internal.misc.Unsafe class
51 */
52
53
54 #define MAX_OBJECT_SIZE \
55 ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
56 + ((julong)max_jint * sizeof(double)) )
57
58
59 #define UNSAFE_ENTRY(result_type, header) \
60 JVM_ENTRY(static result_type, header)
61
62 #define UNSAFE_LEAF(result_type, header) \
63 JVM_LEAF(static result_type, header)
64
65 #define UNSAFE_END JVM_END
66
67 #define UnsafeWrapper(arg) /*nothing, for the present*/
68
69
70 static inline void* addr_from_java(jlong addr) {
71 // This assert fails in a variety of ways on 32-bit systems.
72 // It is impossible to predict whether native code that converts
73 // pointers to longs will sign-extend or zero-extend the addresses.
74 //assert(addr == (uintptr_t)addr, "must not be odd high bits");
75 return (void*)(uintptr_t)addr;
76 }
77
78 static inline jlong addr_to_java(void* p) {
79 assert(p == (void*)(uintptr_t)p, "must not be odd high bits");
80 return (uintptr_t)p;
81 }
82
83
84 // Note: The VM's obj_field and related accessors use byte-scaled
85 // ("unscaled") offsets, just as the unsafe methods do.
86
87 // However, the method Unsafe.fieldOffset explicitly declines to
88 // guarantee this. The field offset values manipulated by the Java user
143 *(type_name*)index_oop_from_field_offset_long(p, offset) = x
144
145 #define GET_FIELD_VOLATILE(obj, offset, type_name, v) \
146 oop p = JNIHandles::resolve(obj); \
147 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { \
148 OrderAccess::fence(); \
149 } \
150 volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
151
152 #define SET_FIELD_VOLATILE(obj, offset, type_name, x) \
153 oop p = JNIHandles::resolve(obj); \
154 OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), x);
155
156
157 // Get/SetObject must be special-cased, since it works with handles.
158
159 // These functions allow a null base pointer with an arbitrary address.
160 // But if the base pointer is non-null, the offset should make some sense.
161 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
162 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
163 UnsafeWrapper("Unsafe_GetObject");
164
165 oop p = JNIHandles::resolve(obj);
166 oop v;
167
168 if (UseCompressedOops) {
169 narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset);
170 v = oopDesc::decode_heap_oop(n);
171 } else {
172 v = *(oop*)index_oop_from_field_offset_long(p, offset);
173 }
174
175 jobject ret = JNIHandles::make_local(env, v);
176
177 #if INCLUDE_ALL_GCS
178 // We could be accessing the referent field in a reference
179 // object. If G1 is enabled then we need to register non-null
180 // referent with the SATB barrier.
181 if (UseG1GC) {
182 bool needs_barrier = false;
183
184 if (ret != NULL) {
186 oop o = JNIHandles::resolve(obj);
187 Klass* k = o->klass();
188 if (InstanceKlass::cast(k)->reference_type() != REF_NONE) {
189 assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
190 needs_barrier = true;
191 }
192 }
193 }
194
195 if (needs_barrier) {
196 oop referent = JNIHandles::resolve(ret);
197 G1SATBCardTableModRefBS::enqueue(referent);
198 }
199 }
200 #endif // INCLUDE_ALL_GCS
201
202 return ret;
203 } UNSAFE_END
204
205 UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
206 UnsafeWrapper("Unsafe_SetObject");
207
208 oop x = JNIHandles::resolve(x_h);
209 oop p = JNIHandles::resolve(obj);
210
211 if (UseCompressedOops) {
212 oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
213 } else {
214 oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
215 }
216 } UNSAFE_END
217
218 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
219 UnsafeWrapper("Unsafe_GetObjectVolatile");
220
221 oop p = JNIHandles::resolve(obj);
222 void* addr = index_oop_from_field_offset_long(p, offset);
223
224 volatile oop v;
225
226 if (UseCompressedOops) {
227 volatile narrowOop n = *(volatile narrowOop*) addr;
228 (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
229 } else {
230 (void)const_cast<oop&>(v = *(volatile oop*) addr);
231 }
232
233 OrderAccess::acquire();
234 return JNIHandles::make_local(env, v);
235 } UNSAFE_END
236
237 UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
238 UnsafeWrapper("Unsafe_SetObjectVolatile");
239
240 oop x = JNIHandles::resolve(x_h);
241 oop p = JNIHandles::resolve(obj);
242 void* addr = index_oop_from_field_offset_long(p, offset);
243 OrderAccess::release();
244
245 if (UseCompressedOops) {
246 oop_store((narrowOop*)addr, x);
247 } else {
248 oop_store((oop*)addr, x);
249 }
250
251 OrderAccess::fence();
252 } UNSAFE_END
253
254 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) {
255 UnsafeWrapper("Unsafe_GetUncompressedObject");
256
257 oop v = *(oop*) (address) addr;
258
259 return JNIHandles::make_local(env, v);
260 } UNSAFE_END
261
262 UNSAFE_ENTRY(jclass, Unsafe_GetJavaMirror(JNIEnv *env, jobject unsafe, jlong metaspace_klass)) {
263 UnsafeWrapper("Unsafe_GetJavaMirror");
264
265 Klass* klass = (Klass*) (address) metaspace_klass;
266
267 return (jclass) JNIHandles::make_local(klass->java_mirror());
268 } UNSAFE_END
269
270 UNSAFE_ENTRY(jlong, Unsafe_GetKlassPointer(JNIEnv *env, jobject unsafe, jobject obj)) {
271 UnsafeWrapper("Unsafe_GetKlassPointer");
272
273 oop o = JNIHandles::resolve(obj);
274 jlong klass = (jlong) (address) o->klass();
275
276 return klass;
277 } UNSAFE_END
278
279 #ifndef SUPPORTS_NATIVE_CX8
280
281 // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
282 //
283 // On platforms which do not support atomic compare-and-swap of jlong (8 byte)
284 // values we have to use a lock-based scheme to enforce atomicity. This has to be
285 // applied to all Unsafe operations that set the value of a jlong field. Even so
286 // the compareAndSwapLong operation will not be atomic with respect to direct stores
287 // to the field from Java code. It is important therefore that any Java code that
288 // utilizes these Unsafe jlong operations does not perform direct stores. To permit
289 // direct loads of the field from Java code we must also use Atomic::store within the
290 // locked regions. And for good measure, in case there are direct stores, we also
291 // employ Atomic::load within those regions. Note that the field in question must be
292 // volatile and so must have atomic load/store accesses applied at the Java level.
293 //
294 // The locking scheme could utilize a range of strategies for controlling the locking
295 // granularity: from a lock per-field through to a single global lock. The latter is
296 // the simplest and is used for the current implementation. Note that the Java object
297 // that contains the field, can not, in general, be used for locking. To do so can lead
298 // to deadlocks as we may introduce locking into what appears to the Java code to be a
299 // lock-free path.
300 //
301 // As all the locked-regions are very short and themselves non-blocking we can treat
302 // them as leaf routines and elide safepoint checks (ie we don't perform any thread
303 // state transitions even when blocking for the lock). Note that if we do choose to
304 // add safepoint checks and thread state transitions, we must ensure that we calculate
305 // the address of the field _after_ we have acquired the lock, else the object may have
306 // been moved by the GC
307
308 UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
309 UnsafeWrapper("Unsafe_GetLongVolatile");
310
311 if (VM_Version::supports_cx8()) {
312 GET_FIELD_VOLATILE(obj, offset, jlong, v);
313 return v;
314 } else {
315 Handle p (THREAD, JNIHandles::resolve(obj));
316 jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
317 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
318 jlong value = Atomic::load(addr);
319 return value;
320 }
321 } UNSAFE_END
322
323 UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
324 UnsafeWrapper("Unsafe_SetLongVolatile");
325
326 if (VM_Version::supports_cx8()) {
327 SET_FIELD_VOLATILE(obj, offset, jlong, x);
328 } else {
329 Handle p (THREAD, JNIHandles::resolve(obj));
330 jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
331 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
332 Atomic::store(x, addr);
333 }
334 } UNSAFE_END
335
336 #endif // not SUPPORTS_NATIVE_CX8
337
338 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
339 UnsafeWrapper("Unsafe_IsBigEndian0");
340
341 #ifdef VM_LITTLE_ENDIAN
342 return false;
343 #else
344 return true;
345 #endif
346 } UNSAFE_END
347
348 UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
349 UnsafeWrapper("Unsafe_UnalignedAccess0");
350
351 return UseUnalignedAccesses;
352 } UNSAFE_END
353
354 #define DEFINE_GETSETOOP(java_type, Type) \
355 \
356 UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
357 UnsafeWrapper("Unsafe_Get"#Type); \
358 GET_FIELD(obj, offset, java_type, v); \
359 return v; \
360 } UNSAFE_END \
361 \
362 UNSAFE_ENTRY(void, Unsafe_Set##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
363 UnsafeWrapper("Unsafe_Set"#Type); \
364 SET_FIELD(obj, offset, java_type, x); \
365 } UNSAFE_END \
366 \
367 // END DEFINE_GETSETOOP.
368
369 DEFINE_GETSETOOP(jboolean, Boolean)
370 DEFINE_GETSETOOP(jbyte, Byte)
371 DEFINE_GETSETOOP(jshort, Short);
372 DEFINE_GETSETOOP(jchar, Char);
373 DEFINE_GETSETOOP(jint, Int);
374 DEFINE_GETSETOOP(jlong, Long);
375 DEFINE_GETSETOOP(jfloat, Float);
376 DEFINE_GETSETOOP(jdouble, Double);
377
378 #undef DEFINE_GETSETOOP
379
380 #define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
381 \
382 UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
383 UnsafeWrapper("Unsafe_Get"#Type); \
384 GET_FIELD_VOLATILE(obj, offset, java_type, v); \
385 return v; \
386 } UNSAFE_END \
387 \
388 UNSAFE_ENTRY(void, Unsafe_Set##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
389 UnsafeWrapper("Unsafe_Set"#Type); \
390 SET_FIELD_VOLATILE(obj, offset, java_type, x); \
391 } UNSAFE_END \
392 \
393 // END DEFINE_GETSETOOP_VOLATILE.
394
395 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
396 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
397 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
398 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
399 DEFINE_GETSETOOP_VOLATILE(jint, Int);
400 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
401 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
402
403 #ifdef SUPPORTS_NATIVE_CX8
404 DEFINE_GETSETOOP_VOLATILE(jlong, Long);
405 #endif
406
407 #undef DEFINE_GETSETOOP_VOLATILE
408
409 // The non-intrinsified versions of setOrdered just use setVolatile
410
411 UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x)) {
412 UnsafeWrapper("Unsafe_SetOrderedInt");
413
414 SET_FIELD_VOLATILE(obj, offset, jint, x);
415 } UNSAFE_END
416
417 UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
418 UnsafeWrapper("Unsafe_SetOrderedObject");
419
420 oop x = JNIHandles::resolve(x_h);
421 oop p = JNIHandles::resolve(obj);
422 void* addr = index_oop_from_field_offset_long(p, offset);
423 OrderAccess::release();
424
425 if (UseCompressedOops) {
426 oop_store((narrowOop*)addr, x);
427 } else {
428 oop_store((oop*)addr, x);
429 }
430
431 OrderAccess::fence();
432 } UNSAFE_END
433
434 UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
435 UnsafeWrapper("Unsafe_SetOrderedLong");
436
437 #ifdef SUPPORTS_NATIVE_CX8
438 SET_FIELD_VOLATILE(obj, offset, jlong, x);
439 #else
440
441 // Keep old code for platforms which may not have atomic long (8 bytes) instructions
442 if (VM_Version::supports_cx8()) {
443 SET_FIELD_VOLATILE(obj, offset, jlong, x);
444 } else {
445 Handle p(THREAD, JNIHandles::resolve(obj));
446 jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
447 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
448 Atomic::store(x, addr);
449 }
450 #endif
451 } UNSAFE_END
452
453 UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
454 UnsafeWrapper("Unsafe_LoadFence");
455
456 OrderAccess::acquire();
457 } UNSAFE_END
458
459 UNSAFE_LEAF(void, Unsafe_StoreFence(JNIEnv *env, jobject unsafe)) {
460 UnsafeWrapper("Unsafe_StoreFence");
461
462 OrderAccess::release();
463 } UNSAFE_END
464
465 UNSAFE_LEAF(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe)) {
466 UnsafeWrapper("Unsafe_FullFence");
467
468 OrderAccess::fence();
469 } UNSAFE_END
470
471 ////// Data in the C heap.
472
473 // Note: These do not throw NullPointerException for bad pointers.
474 // They just crash. Only a oop base pointer can generate a NullPointerException.
475 //
476 #define DEFINE_GETSETNATIVE(java_type, Type, native_type) \
477 \
478 UNSAFE_ENTRY(java_type, Unsafe_GetNative##Type(JNIEnv *env, jobject unsafe, jlong addr)) { \
479 UnsafeWrapper("Unsafe_GetNative"#Type); \
480 void* p = addr_from_java(addr); \
481 JavaThread* t = JavaThread::current(); \
482 t->set_doing_unsafe_access(true); \
483 java_type x = *(volatile native_type*)p; \
484 t->set_doing_unsafe_access(false); \
485 return x; \
486 } UNSAFE_END \
487 \
488 UNSAFE_ENTRY(void, Unsafe_SetNative##Type(JNIEnv *env, jobject unsafe, jlong addr, java_type x)) { \
489 UnsafeWrapper("Unsafe_SetNative"#Type); \
490 JavaThread* t = JavaThread::current(); \
491 t->set_doing_unsafe_access(true); \
492 void* p = addr_from_java(addr); \
493 *(volatile native_type*)p = x; \
494 t->set_doing_unsafe_access(false); \
495 } UNSAFE_END \
496 \
497 // END DEFINE_GETSETNATIVE.
498
499 DEFINE_GETSETNATIVE(jbyte, Byte, signed char)
500 DEFINE_GETSETNATIVE(jshort, Short, signed short);
501 DEFINE_GETSETNATIVE(jchar, Char, unsigned short);
502 DEFINE_GETSETNATIVE(jint, Int, jint);
503 // no long -- handled specially
504 DEFINE_GETSETNATIVE(jfloat, Float, float);
505 DEFINE_GETSETNATIVE(jdouble, Double, double);
506
507 #undef DEFINE_GETSETNATIVE
508
509 UNSAFE_ENTRY(jlong, Unsafe_GetNativeLong(JNIEnv *env, jobject unsafe, jlong addr)) {
510 UnsafeWrapper("Unsafe_GetNativeLong");
511
512 JavaThread* t = JavaThread::current();
513 // We do it this way to avoid problems with access to heap using 64
514 // bit loads, as jlong in heap could be not 64-bit aligned, and on
515 // some CPUs (SPARC) it leads to SIGBUS.
516 t->set_doing_unsafe_access(true);
517 void* p = addr_from_java(addr);
518 jlong x;
519
520 if (is_ptr_aligned(p, sizeof(jlong)) == 0) {
521 // jlong is aligned, do a volatile access
522 x = *(volatile jlong*)p;
523 } else {
524 jlong_accessor acc;
525 acc.words[0] = ((volatile jint*)p)[0];
526 acc.words[1] = ((volatile jint*)p)[1];
527 x = acc.long_value;
528 }
529
530 t->set_doing_unsafe_access(false);
531
532 return x;
533 } UNSAFE_END
534
535 UNSAFE_ENTRY(void, Unsafe_SetNativeLong(JNIEnv *env, jobject unsafe, jlong addr, jlong x)) {
536 UnsafeWrapper("Unsafe_SetNativeLong");
537
538 JavaThread* t = JavaThread::current();
539 // see comment for Unsafe_GetNativeLong
540 t->set_doing_unsafe_access(true);
541 void* p = addr_from_java(addr);
542
543 if (is_ptr_aligned(p, sizeof(jlong))) {
544 // jlong is aligned, do a volatile access
545 *(volatile jlong*)p = x;
546 } else {
547 jlong_accessor acc;
548 acc.long_value = x;
549 ((volatile jint*)p)[0] = acc.words[0];
550 ((volatile jint*)p)[1] = acc.words[1];
551 }
552
553 t->set_doing_unsafe_access(false);
554 } UNSAFE_END
555
556
557 UNSAFE_LEAF(jlong, Unsafe_GetNativeAddress(JNIEnv *env, jobject unsafe, jlong addr)) {
558 UnsafeWrapper("Unsafe_GetNativeAddress");
559
560 void* p = addr_from_java(addr);
561
562 return addr_to_java(*(void**)p);
563 } UNSAFE_END
564
565 UNSAFE_LEAF(void, Unsafe_SetNativeAddress(JNIEnv *env, jobject unsafe, jlong addr, jlong x)) {
566 UnsafeWrapper("Unsafe_SetNativeAddress");
567
568 void* p = addr_from_java(addr);
569 *(void**)p = addr_from_java(x);
570 } UNSAFE_END
571
572
573 ////// Allocation requests
574
575 UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
576 UnsafeWrapper("Unsafe_AllocateInstance");
577
578 {
579 ThreadToNativeFromVM ttnfv(thread);
580 return env->AllocObject(cls);
581 }
582 } UNSAFE_END
583
584 UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
585 UnsafeWrapper("Unsafe_AllocateMemory");
586
587 size_t sz = (size_t)size;
588
589 sz = round_to(sz, HeapWordSize);
590 void* x = os::malloc(sz, mtInternal);
591
592 return addr_to_java(x);
593 } UNSAFE_END
594
595 UNSAFE_ENTRY(jlong, Unsafe_ReallocateMemory0(JNIEnv *env, jobject unsafe, jlong addr, jlong size)) {
596 UnsafeWrapper("Unsafe_ReallocateMemory0");
597
598 void* p = addr_from_java(addr);
599 size_t sz = (size_t)size;
600 sz = round_to(sz, HeapWordSize);
601
602 void* x = os::realloc(p, sz, mtInternal);
603
604 return addr_to_java(x);
605 } UNSAFE_END
606
607 UNSAFE_ENTRY(void, Unsafe_FreeMemory0(JNIEnv *env, jobject unsafe, jlong addr)) {
608 UnsafeWrapper("Unsafe_FreeMemory0");
609
610 void* p = addr_from_java(addr);
611
612 os::free(p);
613 } UNSAFE_END
614
615 UNSAFE_ENTRY(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong size, jbyte value)) {
616 UnsafeWrapper("Unsafe_SetMemory0");
617
618 size_t sz = (size_t)size;
619
620 oop base = JNIHandles::resolve(obj);
621 void* p = index_oop_from_field_offset_long(base, offset);
622
623 Copy::fill_to_memory_atomic(p, sz, value);
624 } UNSAFE_END
625
626 UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size)) {
627 UnsafeWrapper("Unsafe_CopyMemory0");
628
629 size_t sz = (size_t)size;
630
631 oop srcp = JNIHandles::resolve(srcObj);
632 oop dstp = JNIHandles::resolve(dstObj);
633
634 void* src = index_oop_from_field_offset_long(srcp, srcOffset);
635 void* dst = index_oop_from_field_offset_long(dstp, dstOffset);
636
637 Copy::conjoint_memory_atomic(src, dst, sz);
638 } UNSAFE_END
639
640 // This function is a leaf since if the source and destination are both in native memory
641 // the copy may potentially be very large, and we don't want to disable GC if we can avoid it.
642 // If either source or destination (or both) are on the heap, the function will enter VM using
643 // JVM_ENTRY_FROM_LEAF
644 UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size, jlong elemSize)) {
645 UnsafeWrapper("Unsafe_CopySwapMemory0");
646
647 size_t sz = (size_t)size;
648 size_t esz = (size_t)elemSize;
649
650 if (srcObj == NULL && dstObj == NULL) {
651 // Both src & dst are in native memory
652 address src = (address)srcOffset;
653 address dst = (address)dstOffset;
654
655 Copy::conjoint_swap(src, dst, sz, esz);
656 } else {
657 // At least one of src/dst are on heap, transition to VM to access raw pointers
658
659 JVM_ENTRY_FROM_LEAF(env, void, Unsafe_CopySwapMemory0) {
660 oop srcp = JNIHandles::resolve(srcObj);
661 oop dstp = JNIHandles::resolve(dstObj);
662
663 address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
664 address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);
665
666 Copy::conjoint_swap(src, dst, sz, esz);
667 } JVM_END
668 }
669 } UNSAFE_END
670
671 ////// Random queries
672
673 UNSAFE_LEAF(jint, Unsafe_AddressSize0(JNIEnv *env, jobject unsafe)) {
674 UnsafeWrapper("Unsafe_AddressSize");
675
676 return sizeof(void*);
677 } UNSAFE_END
678
679 UNSAFE_LEAF(jint, Unsafe_PageSize()) {
680 UnsafeWrapper("Unsafe_PageSize");
681
682 return os::vm_page_size();
683 } UNSAFE_END
684
685 static jint find_field_offset(jobject field, int must_be_static, TRAPS) {
686 assert(field != NULL, "field must not be NULL");
687
688 oop reflected = JNIHandles::resolve_non_null(field);
689 oop mirror = java_lang_reflect_Field::clazz(reflected);
690 Klass* k = java_lang_Class::as_Klass(mirror);
691 int slot = java_lang_reflect_Field::slot(reflected);
692 int modifiers = java_lang_reflect_Field::modifiers(reflected);
693
694 if (must_be_static >= 0) {
695 int really_is_static = ((modifiers & JVM_ACC_STATIC) != 0);
696 if (must_be_static != really_is_static) {
697 THROW_0(vmSymbols::java_lang_IllegalArgumentException());
698 }
699 }
700
701 int offset = InstanceKlass::cast(k)->field_offset(slot);
702 return field_offset_from_byte_offset(offset);
703 }
704
705 UNSAFE_ENTRY(jlong, Unsafe_ObjectFieldOffset0(JNIEnv *env, jobject unsafe, jobject field)) {
706 UnsafeWrapper("Unsafe_ObjectFieldOffset0");
707
708 return find_field_offset(field, 0, THREAD);
709 } UNSAFE_END
710
711 UNSAFE_ENTRY(jlong, Unsafe_StaticFieldOffset0(JNIEnv *env, jobject unsafe, jobject field)) {
712 UnsafeWrapper("Unsafe_StaticFieldOffset0");
713
714 return find_field_offset(field, 1, THREAD);
715 } UNSAFE_END
716
717 UNSAFE_ENTRY(jobject, Unsafe_StaticFieldBase0(JNIEnv *env, jobject unsafe, jobject field)) {
718 UnsafeWrapper("Unsafe_StaticFieldBase0");
719
720 assert(field != NULL, "field must not be NULL");
721
722 // Note: In this VM implementation, a field address is always a short
723 // offset from the base of a a klass metaobject. Thus, the full dynamic
724 // range of the return type is never used. However, some implementations
725 // might put the static field inside an array shared by many classes,
726 // or even at a fixed address, in which case the address could be quite
727 // large. In that last case, this function would return NULL, since
728 // the address would operate alone, without any base pointer.
729
730 oop reflected = JNIHandles::resolve_non_null(field);
731 oop mirror = java_lang_reflect_Field::clazz(reflected);
732 int modifiers = java_lang_reflect_Field::modifiers(reflected);
733
734 if ((modifiers & JVM_ACC_STATIC) == 0) {
735 THROW_0(vmSymbols::java_lang_IllegalArgumentException());
736 }
737
738 return JNIHandles::make_local(env, mirror);
739 } UNSAFE_END
740
741 UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized0(JNIEnv *env, jobject unsafe, jobject clazz)) {
742 UnsafeWrapper("Unsafe_EnsureClassInitialized0");
743
744 assert(clazz != NULL, "clazz must not be NULL");
745
746 oop mirror = JNIHandles::resolve_non_null(clazz);
747
748 Klass* klass = java_lang_Class::as_Klass(mirror);
749 if (klass != NULL && klass->should_be_initialized()) {
750 InstanceKlass* k = InstanceKlass::cast(klass);
751 k->initialize(CHECK);
752 }
753 }
754 UNSAFE_END
755
756 UNSAFE_ENTRY(jboolean, Unsafe_ShouldBeInitialized0(JNIEnv *env, jobject unsafe, jobject clazz)) {
757 UnsafeWrapper("Unsafe_ShouldBeInitialized0");
758
759 assert(clazz != NULL, "clazz must not be NULL");
760
761 oop mirror = JNIHandles::resolve_non_null(clazz);
762 Klass* klass = java_lang_Class::as_Klass(mirror);
763
764 if (klass != NULL && klass->should_be_initialized()) {
765 return true;
766 }
767
768 return false;
769 }
770 UNSAFE_END
771
772 static void getBaseAndScale(int& base, int& scale, jclass clazz, TRAPS) {
773 assert(clazz != NULL, "clazz must not be NULL");
774
775 oop mirror = JNIHandles::resolve_non_null(clazz);
776 Klass* k = java_lang_Class::as_Klass(mirror);
777
778 if (k == NULL || !k->is_array_klass()) {
779 THROW(vmSymbols::java_lang_InvalidClassException());
780 } else if (k->is_objArray_klass()) {
781 base = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
782 scale = heapOopSize;
783 } else if (k->is_typeArray_klass()) {
784 TypeArrayKlass* tak = TypeArrayKlass::cast(k);
785 base = tak->array_header_in_bytes();
786 assert(base == arrayOopDesc::base_offset_in_bytes(tak->element_type()), "array_header_size semantics ok");
787 scale = (1 << tak->log2_element_size());
788 } else {
789 ShouldNotReachHere();
790 }
791 }
792
793 UNSAFE_ENTRY(jint, Unsafe_ArrayBaseOffset0(JNIEnv *env, jobject unsafe, jclass clazz)) {
794 UnsafeWrapper("Unsafe_ArrayBaseOffset0");
795
796 int base = 0, scale = 0;
797 getBaseAndScale(base, scale, clazz, CHECK_0);
798
799 return field_offset_from_byte_offset(base);
800 } UNSAFE_END
801
802
803 UNSAFE_ENTRY(jint, Unsafe_ArrayIndexScale0(JNIEnv *env, jobject unsafe, jclass clazz)) {
804 UnsafeWrapper("Unsafe_ArrayIndexScale0");
805
806 int base = 0, scale = 0;
807 getBaseAndScale(base, scale, clazz, CHECK_0);
808
809 // This VM packs both fields and array elements down to the byte.
810 // But watch out: If this changes, so that array references for
811 // a given primitive type (say, T_BOOLEAN) use different memory units
812 // than fields, this method MUST return zero for such arrays.
813 // For example, the VM used to store sub-word sized fields in full
814 // words in the object layout, so that accessors like getByte(Object,int)
815 // did not really do what one might expect for arrays. Therefore,
816 // this function used to report a zero scale factor, so that the user
817 // would know not to attempt to access sub-word array elements.
818 // // Code for unpacked fields:
819 // if (scale < wordSize) return 0;
820
821 // The following allows for a pretty general fieldOffset cookie scheme,
822 // but requires it to be linear in byte offset.
823 return field_offset_from_byte_offset(scale) - field_offset_from_byte_offset(0);
824 } UNSAFE_END
825
882 env->GetStringUTFRegion(name, 0, unicode_len, utfName);
883
884 for (uint i = 0; i < len; i++) {
885 if (utfName[i] == '.') utfName[i] = '/';
886 }
887 }
888
889 result = JVM_DefineClass(env, utfName, loader, body, length, pd);
890
891 if (utfName && utfName != buf) {
892 FREE_C_HEAP_ARRAY(char, utfName);
893 }
894
895 free_body:
896 FREE_C_HEAP_ARRAY(jbyte, body);
897 return result;
898 }
899
900
901 UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd)) {
902 UnsafeWrapper("Unsafe_DefineClass");
903
904 ThreadToNativeFromVM ttnfv(thread);
905
906 return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
907 } UNSAFE_END
908
909
910 // define a class but do not make it known to the class loader or system dictionary
911 // - host_class: supplies context for linkage, access control, protection domain, and class loader
912 // - data: bytes of a class file, a raw memory address (length gives the number of bytes)
913 // - cp_patches: where non-null entries exist, they replace corresponding CP entries in data
914
915 // When you load an anonymous class U, it works as if you changed its name just before loading,
916 // to a name that you will never use again. Since the name is lost, no other class can directly
917 // link to any member of U. Just after U is loaded, the only way to use it is reflectively,
918 // through java.lang.Class methods like Class.newInstance.
919
920 // Access checks for linkage sites within U continue to follow the same rules as for named classes.
921 // The package of an anonymous class is given by the package qualifier on the name under which it was loaded.
922 // An anonymous class also has special privileges to access any member of its host class.
923 // This is the main reason why this loading operation is unsafe. The purpose of this is to
1017 }
1018
1019 ClassFileStream st(class_bytes, class_bytes_length, host_source, ClassFileStream::verify);
1020
1021 Symbol* no_class_name = NULL;
1022 Klass* anonk = SystemDictionary::parse_stream(no_class_name,
1023 host_loader,
1024 host_domain,
1025 &st,
1026 host_klass,
1027 cp_patches,
1028 CHECK_NULL);
1029 if (anonk == NULL) {
1030 return NULL;
1031 }
1032
1033 return instanceKlassHandle(THREAD, anonk);
1034 }
1035
1036 UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass0(JNIEnv *env, jobject unsafe, jclass host_class, jbyteArray data, jobjectArray cp_patches_jh)) {
1037 UnsafeWrapper("Unsafe_DefineAnonymousClass0");
1038
1039 ResourceMark rm(THREAD);
1040
1041 instanceKlassHandle anon_klass;
1042 jobject res_jh = NULL;
1043 u1* temp_alloc = NULL;
1044
1045 anon_klass = Unsafe_DefineAnonymousClass_impl(env, host_class, data, cp_patches_jh, &temp_alloc, THREAD);
1046 if (anon_klass() != NULL) {
1047 res_jh = JNIHandles::make_local(env, anon_klass->java_mirror());
1048 }
1049
1050 // try/finally clause:
1051 if (temp_alloc != NULL) {
1052 FREE_C_HEAP_ARRAY(u1, temp_alloc);
1053 }
1054
1055 // The anonymous class loader data has been artificially been kept alive to
1056 // this point. The mirror and any instances of this class have to keep
1057 // it alive afterwards.
1058 if (anon_klass() != NULL) {
1059 anon_klass->class_loader_data()->set_keep_alive(false);
1060 }
1061
1062 // let caller initialize it as needed...
1063
1064 return (jclass) res_jh;
1065 } UNSAFE_END
1066
1067 UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr)) {
1068 UnsafeWrapper("Unsafe_ThrowException");
1069
1070 {
1071 ThreadToNativeFromVM ttnfv(thread);
1072 env->Throw(thr);
1073 }
1074 } UNSAFE_END
1075
1076 // JSR166 ------------------------------------------------------------------
1077
1078 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
1079 UnsafeWrapper("Unsafe_CompareAndSwapObject");
1080
1081 oop x = JNIHandles::resolve(x_h);
1082 oop e = JNIHandles::resolve(e_h);
1083 oop p = JNIHandles::resolve(obj);
1084 HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
1085 oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
1086 if (res != e) {
1087 return false;
1088 }
1089
1090 update_barrier_set((void*)addr, x);
1091
1092 return true;
1093 } UNSAFE_END
1094
1095 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
1096 UnsafeWrapper("Unsafe_CompareAndSwapInt");
1097
1098 oop p = JNIHandles::resolve(obj);
1099 jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
1100
1101 return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
1102 } UNSAFE_END
1103
1104 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
1105 UnsafeWrapper("Unsafe_CompareAndSwapLong");
1106
1107 Handle p(THREAD, JNIHandles::resolve(obj));
1108 jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
1109
1110 #ifdef SUPPORTS_NATIVE_CX8
1111 return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
1112 #else
1113 if (VM_Version::supports_cx8()) {
1114 return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
1115 } else {
1116 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
1117
1118 jlong val = Atomic::load(addr);
1119 if (val != e) {
1120 return false;
1121 }
1122
1123 Atomic::store(x, addr);
1124 return true;
1125 }
1126 #endif
1127 } UNSAFE_END
1128
1129 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) {
1130 UnsafeWrapper("Unsafe_Park");
1131
1132 EventThreadPark event;
1133 HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time);
1134
1135 JavaThreadParkedState jtps(thread, time != 0);
1136 thread->parker()->park(isAbsolute != 0, time);
1137
1138 HOTSPOT_THREAD_PARK_END((uintptr_t) thread->parker());
1139
1140 if (event.should_commit()) {
1141 oop obj = thread->current_park_blocker();
1142 event.set_klass((obj != NULL) ? obj->klass() : NULL);
1143 event.set_timeout(time);
1144 event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
1145 event.commit();
1146 }
1147 } UNSAFE_END
1148
1149 UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) {
1150 UnsafeWrapper("Unsafe_Unpark");
1151
1152 Parker* p = NULL;
1153
1154 if (jthread != NULL) {
1155 oop java_thread = JNIHandles::resolve_non_null(jthread);
1156 if (java_thread != NULL) {
1157 jlong lp = java_lang_Thread::park_event(java_thread);
1158 if (lp != 0) {
1159 // This cast is OK even though the jlong might have been read
1160 // non-atomically on 32bit systems, since there, one word will
1161 // always be zero anyway and the value set is always the same
1162 p = (Parker*)addr_from_java(lp);
1163 } else {
1164 // Grab lock if apparently null or using older version of library
1165 MutexLocker mu(Threads_lock);
1166 java_thread = JNIHandles::resolve_non_null(jthread);
1167
1168 if (java_thread != NULL) {
1169 JavaThread* thr = java_lang_Thread::thread(java_thread);
1170 if (thr != NULL) {
1171 p = thr->parker();
1172 if (p != NULL) { // Bind to Java thread for next time.
1173 java_lang_Thread::set_park_event(java_thread, addr_to_java(p));
1174 }
1175 }
1176 }
1177 }
1178 }
1179 }
1180
1181 if (p != NULL) {
1182 HOTSPOT_THREAD_UNPARK((uintptr_t) p);
1183 p->unpark();
1184 }
1185 } UNSAFE_END
1186
1187 UNSAFE_ENTRY(jint, Unsafe_GetLoadAverage0(JNIEnv *env, jobject unsafe, jdoubleArray loadavg, jint nelem)) {
1188 UnsafeWrapper("Unsafe_Loadavg");
1189
1190 const int max_nelem = 3;
1191 double la[max_nelem];
1192 jint ret;
1193
1194 typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(loadavg));
1195 assert(a->is_typeArray(), "must be type array");
1196
1197 ret = os::loadavg(la, nelem);
1198 if (ret == -1) {
1199 return -1;
1200 }
1201
1202 // if successful, ret is the number of samples actually retrieved.
1203 assert(ret >= 0 && ret <= max_nelem, "Unexpected loadavg return value");
1204 switch(ret) {
1205 case 3: a->double_at_put(2, (jdouble)la[2]); // fall through
1206 case 2: a->double_at_put(1, (jdouble)la[1]); // fall through
1207 case 1: a->double_at_put(0, (jdouble)la[0]); break;
1208 }
1209
1317
1318 #undef ADR
1319 #undef LANG
1320 #undef OBJ
1321 #undef CLS
1322 #undef FLD
1323 #undef THR
1324 #undef DC_Args
1325 #undef DAC_Args
1326
1327 #undef DECLARE_GETPUTOOP
1328 #undef DECLARE_GETPUTNATIVE
1329
1330
1331 // This function is exported, used by NativeLookup.
1332 // The Unsafe_xxx functions above are called only from the interpreter.
1333 // The optimizer looks at names and signatures to recognize
1334 // individual functions.
1335
1336 JVM_ENTRY(void, JVM_RegisterJDKInternalMiscUnsafeMethods(JNIEnv *env, jclass unsafeclass)) {
1337 UnsafeWrapper("JVM_RegisterJDKInternalMiscUnsafeMethods");
1338
1339 {
1340 ThreadToNativeFromVM ttnfv(thread);
1341
1342 int ok = env->RegisterNatives(unsafeclass, jdk_internal_misc_Unsafe_methods, sizeof(jdk_internal_misc_Unsafe_methods)/sizeof(JNINativeMethod));
1343 guarantee(ok == 0, "register jdk.internal.misc.Unsafe natives");
1344 }
1345 } JVM_END
|
47 #endif // INCLUDE_ALL_GCS
48
49 /**
50 * Implementation of the jdk.internal.misc.Unsafe class
51 */
52
53
54 #define MAX_OBJECT_SIZE \
55 ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
56 + ((julong)max_jint * sizeof(double)) )
57
58
59 #define UNSAFE_ENTRY(result_type, header) \
60 JVM_ENTRY(static result_type, header)
61
62 #define UNSAFE_LEAF(result_type, header) \
63 JVM_LEAF(static result_type, header)
64
65 #define UNSAFE_END JVM_END
66
67
68 static inline void* addr_from_java(jlong addr) {
69 // This assert fails in a variety of ways on 32-bit systems.
70 // It is impossible to predict whether native code that converts
71 // pointers to longs will sign-extend or zero-extend the addresses.
72 //assert(addr == (uintptr_t)addr, "must not be odd high bits");
73 return (void*)(uintptr_t)addr;
74 }
75
76 static inline jlong addr_to_java(void* p) {
77 assert(p == (void*)(uintptr_t)p, "must not be odd high bits");
78 return (uintptr_t)p;
79 }
80
81
82 // Note: The VM's obj_field and related accessors use byte-scaled
83 // ("unscaled") offsets, just as the unsafe methods do.
84
85 // However, the method Unsafe.fieldOffset explicitly declines to
86 // guarantee this. The field offset values manipulated by the Java user
141 *(type_name*)index_oop_from_field_offset_long(p, offset) = x
142
143 #define GET_FIELD_VOLATILE(obj, offset, type_name, v) \
144 oop p = JNIHandles::resolve(obj); \
145 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { \
146 OrderAccess::fence(); \
147 } \
148 volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
149
150 #define SET_FIELD_VOLATILE(obj, offset, type_name, x) \
151 oop p = JNIHandles::resolve(obj); \
152 OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), x);
153
154
155 // Get/SetObject must be special-cased, since it works with handles.
156
157 // These functions allow a null base pointer with an arbitrary address.
158 // But if the base pointer is non-null, the offset should make some sense.
159 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
160 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
161 oop p = JNIHandles::resolve(obj);
162 oop v;
163
164 if (UseCompressedOops) {
165 narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset);
166 v = oopDesc::decode_heap_oop(n);
167 } else {
168 v = *(oop*)index_oop_from_field_offset_long(p, offset);
169 }
170
171 jobject ret = JNIHandles::make_local(env, v);
172
173 #if INCLUDE_ALL_GCS
174 // We could be accessing the referent field in a reference
175 // object. If G1 is enabled then we need to register non-null
176 // referent with the SATB barrier.
177 if (UseG1GC) {
178 bool needs_barrier = false;
179
180 if (ret != NULL) {
182 oop o = JNIHandles::resolve(obj);
183 Klass* k = o->klass();
184 if (InstanceKlass::cast(k)->reference_type() != REF_NONE) {
185 assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
186 needs_barrier = true;
187 }
188 }
189 }
190
191 if (needs_barrier) {
192 oop referent = JNIHandles::resolve(ret);
193 G1SATBCardTableModRefBS::enqueue(referent);
194 }
195 }
196 #endif // INCLUDE_ALL_GCS
197
198 return ret;
199 } UNSAFE_END
200
201 UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
202 oop x = JNIHandles::resolve(x_h);
203 oop p = JNIHandles::resolve(obj);
204
205 if (UseCompressedOops) {
206 oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
207 } else {
208 oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
209 }
210 } UNSAFE_END
211
212 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
213 oop p = JNIHandles::resolve(obj);
214 void* addr = index_oop_from_field_offset_long(p, offset);
215
216 volatile oop v;
217
218 if (UseCompressedOops) {
219 volatile narrowOop n = *(volatile narrowOop*) addr;
220 (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
221 } else {
222 (void)const_cast<oop&>(v = *(volatile oop*) addr);
223 }
224
225 OrderAccess::acquire();
226 return JNIHandles::make_local(env, v);
227 } UNSAFE_END
228
229 UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
230 oop x = JNIHandles::resolve(x_h);
231 oop p = JNIHandles::resolve(obj);
232 void* addr = index_oop_from_field_offset_long(p, offset);
233 OrderAccess::release();
234
235 if (UseCompressedOops) {
236 oop_store((narrowOop*)addr, x);
237 } else {
238 oop_store((oop*)addr, x);
239 }
240
241 OrderAccess::fence();
242 } UNSAFE_END
243
244 UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe, jlong addr)) {
245 oop v = *(oop*) (address) addr;
246
247 return JNIHandles::make_local(env, v);
248 } UNSAFE_END
249
250 UNSAFE_ENTRY(jclass, Unsafe_GetJavaMirror(JNIEnv *env, jobject unsafe, jlong metaspace_klass)) {
251 Klass* klass = (Klass*) (address) metaspace_klass;
252
253 return (jclass) JNIHandles::make_local(klass->java_mirror());
254 } UNSAFE_END
255
256 UNSAFE_ENTRY(jlong, Unsafe_GetKlassPointer(JNIEnv *env, jobject unsafe, jobject obj)) {
257 oop o = JNIHandles::resolve(obj);
258 jlong klass = (jlong) (address) o->klass();
259
260 return klass;
261 } UNSAFE_END
262
263 #ifndef SUPPORTS_NATIVE_CX8
264
265 // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
266 //
267 // On platforms which do not support atomic compare-and-swap of jlong (8 byte)
268 // values we have to use a lock-based scheme to enforce atomicity. This has to be
269 // applied to all Unsafe operations that set the value of a jlong field. Even so
270 // the compareAndSwapLong operation will not be atomic with respect to direct stores
271 // to the field from Java code. It is important therefore that any Java code that
272 // utilizes these Unsafe jlong operations does not perform direct stores. To permit
273 // direct loads of the field from Java code we must also use Atomic::store within the
274 // locked regions. And for good measure, in case there are direct stores, we also
275 // employ Atomic::load within those regions. Note that the field in question must be
276 // volatile and so must have atomic load/store accesses applied at the Java level.
277 //
278 // The locking scheme could utilize a range of strategies for controlling the locking
279 // granularity: from a lock per-field through to a single global lock. The latter is
280 // the simplest and is used for the current implementation. Note that the Java object
281 // that contains the field, can not, in general, be used for locking. To do so can lead
282 // to deadlocks as we may introduce locking into what appears to the Java code to be a
283 // lock-free path.
284 //
285 // As all the locked-regions are very short and themselves non-blocking we can treat
286 // them as leaf routines and elide safepoint checks (ie we don't perform any thread
287 // state transitions even when blocking for the lock). Note that if we do choose to
288 // add safepoint checks and thread state transitions, we must ensure that we calculate
289 // the address of the field _after_ we have acquired the lock, else the object may have
290 // been moved by the GC
291
292 UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
293 if (VM_Version::supports_cx8()) {
294 GET_FIELD_VOLATILE(obj, offset, jlong, v);
295 return v;
296 } else {
297 Handle p (THREAD, JNIHandles::resolve(obj));
298 jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
299 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
300 jlong value = Atomic::load(addr);
301 return value;
302 }
303 } UNSAFE_END
304
305 UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
306 if (VM_Version::supports_cx8()) {
307 SET_FIELD_VOLATILE(obj, offset, jlong, x);
308 } else {
309 Handle p (THREAD, JNIHandles::resolve(obj));
310 jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
311 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
312 Atomic::store(x, addr);
313 }
314 } UNSAFE_END
315
316 #endif // not SUPPORTS_NATIVE_CX8
317
318 UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
319 #ifdef VM_LITTLE_ENDIAN
320 return false;
321 #else
322 return true;
323 #endif
324 } UNSAFE_END
325
326 UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
327 return UseUnalignedAccesses;
328 } UNSAFE_END
329
330 #define DEFINE_GETSETOOP(java_type, Type) \
331 \
332 UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
333 GET_FIELD(obj, offset, java_type, v); \
334 return v; \
335 } UNSAFE_END \
336 \
337 UNSAFE_ENTRY(void, Unsafe_Set##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
338 SET_FIELD(obj, offset, java_type, x); \
339 } UNSAFE_END \
340 \
341 // END DEFINE_GETSETOOP.
342
343 DEFINE_GETSETOOP(jboolean, Boolean)
344 DEFINE_GETSETOOP(jbyte, Byte)
345 DEFINE_GETSETOOP(jshort, Short);
346 DEFINE_GETSETOOP(jchar, Char);
347 DEFINE_GETSETOOP(jint, Int);
348 DEFINE_GETSETOOP(jlong, Long);
349 DEFINE_GETSETOOP(jfloat, Float);
350 DEFINE_GETSETOOP(jdouble, Double);
351
352 #undef DEFINE_GETSETOOP
353
354 #define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
355 \
356 UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
357 GET_FIELD_VOLATILE(obj, offset, java_type, v); \
358 return v; \
359 } UNSAFE_END \
360 \
361 UNSAFE_ENTRY(void, Unsafe_Set##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
362 SET_FIELD_VOLATILE(obj, offset, java_type, x); \
363 } UNSAFE_END \
364 \
365 // END DEFINE_GETSETOOP_VOLATILE.
366
367 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
368 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
369 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
370 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
371 DEFINE_GETSETOOP_VOLATILE(jint, Int);
372 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
373 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
374
375 #ifdef SUPPORTS_NATIVE_CX8
376 DEFINE_GETSETOOP_VOLATILE(jlong, Long);
377 #endif
378
379 #undef DEFINE_GETSETOOP_VOLATILE
380
381 // The non-intrinsified versions of setOrdered just use setVolatile
382
383 UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x)) {
384 SET_FIELD_VOLATILE(obj, offset, jint, x);
385 } UNSAFE_END
386
387 UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
388 oop x = JNIHandles::resolve(x_h);
389 oop p = JNIHandles::resolve(obj);
390 void* addr = index_oop_from_field_offset_long(p, offset);
391 OrderAccess::release();
392
393 if (UseCompressedOops) {
394 oop_store((narrowOop*)addr, x);
395 } else {
396 oop_store((oop*)addr, x);
397 }
398
399 OrderAccess::fence();
400 } UNSAFE_END
401
402 UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
403 #ifdef SUPPORTS_NATIVE_CX8
404 SET_FIELD_VOLATILE(obj, offset, jlong, x);
405 #else
406
407 // Keep old code for platforms which may not have atomic long (8 bytes) instructions
408 if (VM_Version::supports_cx8()) {
409 SET_FIELD_VOLATILE(obj, offset, jlong, x);
410 } else {
411 Handle p(THREAD, JNIHandles::resolve(obj));
412 jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
413 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
414 Atomic::store(x, addr);
415 }
416 #endif
417 } UNSAFE_END
418
419 UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
420 OrderAccess::acquire();
421 } UNSAFE_END
422
423 UNSAFE_LEAF(void, Unsafe_StoreFence(JNIEnv *env, jobject unsafe)) {
424 OrderAccess::release();
425 } UNSAFE_END
426
427 UNSAFE_LEAF(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe)) {
428 OrderAccess::fence();
429 } UNSAFE_END
430
431 ////// Data in the C heap.
432
433 // Note: These do not throw NullPointerException for bad pointers.
434 // They just crash. Only a oop base pointer can generate a NullPointerException.
435 //
436 #define DEFINE_GETSETNATIVE(java_type, Type, native_type) \
437 \
438 UNSAFE_ENTRY(java_type, Unsafe_GetNative##Type(JNIEnv *env, jobject unsafe, jlong addr)) { \
439 void* p = addr_from_java(addr); \
440 JavaThread* t = JavaThread::current(); \
441 t->set_doing_unsafe_access(true); \
442 java_type x = *(volatile native_type*)p; \
443 t->set_doing_unsafe_access(false); \
444 return x; \
445 } UNSAFE_END \
446 \
447 UNSAFE_ENTRY(void, Unsafe_SetNative##Type(JNIEnv *env, jobject unsafe, jlong addr, java_type x)) { \
448 JavaThread* t = JavaThread::current(); \
449 t->set_doing_unsafe_access(true); \
450 void* p = addr_from_java(addr); \
451 *(volatile native_type*)p = x; \
452 t->set_doing_unsafe_access(false); \
453 } UNSAFE_END \
454 \
455 // END DEFINE_GETSETNATIVE.
456
457 DEFINE_GETSETNATIVE(jbyte, Byte, signed char)
458 DEFINE_GETSETNATIVE(jshort, Short, signed short);
459 DEFINE_GETSETNATIVE(jchar, Char, unsigned short);
460 DEFINE_GETSETNATIVE(jint, Int, jint);
461 // no long -- handled specially
462 DEFINE_GETSETNATIVE(jfloat, Float, float);
463 DEFINE_GETSETNATIVE(jdouble, Double, double);
464
465 #undef DEFINE_GETSETNATIVE
466
467 UNSAFE_ENTRY(jlong, Unsafe_GetNativeLong(JNIEnv *env, jobject unsafe, jlong addr)) {
468 JavaThread* t = JavaThread::current();
469 // We do it this way to avoid problems with access to heap using 64
470 // bit loads, as jlong in heap could be not 64-bit aligned, and on
471 // some CPUs (SPARC) it leads to SIGBUS.
472 t->set_doing_unsafe_access(true);
473 void* p = addr_from_java(addr);
474 jlong x;
475
476 if (is_ptr_aligned(p, sizeof(jlong)) == 0) {
477 // jlong is aligned, do a volatile access
478 x = *(volatile jlong*)p;
479 } else {
480 jlong_accessor acc;
481 acc.words[0] = ((volatile jint*)p)[0];
482 acc.words[1] = ((volatile jint*)p)[1];
483 x = acc.long_value;
484 }
485
486 t->set_doing_unsafe_access(false);
487
488 return x;
489 } UNSAFE_END
490
491 UNSAFE_ENTRY(void, Unsafe_SetNativeLong(JNIEnv *env, jobject unsafe, jlong addr, jlong x)) {
492 JavaThread* t = JavaThread::current();
493 // see comment for Unsafe_GetNativeLong
494 t->set_doing_unsafe_access(true);
495 void* p = addr_from_java(addr);
496
497 if (is_ptr_aligned(p, sizeof(jlong))) {
498 // jlong is aligned, do a volatile access
499 *(volatile jlong*)p = x;
500 } else {
501 jlong_accessor acc;
502 acc.long_value = x;
503 ((volatile jint*)p)[0] = acc.words[0];
504 ((volatile jint*)p)[1] = acc.words[1];
505 }
506
507 t->set_doing_unsafe_access(false);
508 } UNSAFE_END
509
510
511 UNSAFE_LEAF(jlong, Unsafe_GetNativeAddress(JNIEnv *env, jobject unsafe, jlong addr)) {
512 void* p = addr_from_java(addr);
513
514 return addr_to_java(*(void**)p);
515 } UNSAFE_END
516
517 UNSAFE_LEAF(void, Unsafe_SetNativeAddress(JNIEnv *env, jobject unsafe, jlong addr, jlong x)) {
518 void* p = addr_from_java(addr);
519 *(void**)p = addr_from_java(x);
520 } UNSAFE_END
521
522
523 ////// Allocation requests
524
525 UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
526 ThreadToNativeFromVM ttnfv(thread);
527 return env->AllocObject(cls);
528 } UNSAFE_END
529
530 UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
531 size_t sz = (size_t)size;
532
533 sz = round_to(sz, HeapWordSize);
534 void* x = os::malloc(sz, mtInternal);
535
536 return addr_to_java(x);
537 } UNSAFE_END
538
539 UNSAFE_ENTRY(jlong, Unsafe_ReallocateMemory0(JNIEnv *env, jobject unsafe, jlong addr, jlong size)) {
540 void* p = addr_from_java(addr);
541 size_t sz = (size_t)size;
542 sz = round_to(sz, HeapWordSize);
543
544 void* x = os::realloc(p, sz, mtInternal);
545
546 return addr_to_java(x);
547 } UNSAFE_END
548
549 UNSAFE_ENTRY(void, Unsafe_FreeMemory0(JNIEnv *env, jobject unsafe, jlong addr)) {
550 void* p = addr_from_java(addr);
551
552 os::free(p);
553 } UNSAFE_END
554
555 UNSAFE_ENTRY(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong size, jbyte value)) {
556 size_t sz = (size_t)size;
557
558 oop base = JNIHandles::resolve(obj);
559 void* p = index_oop_from_field_offset_long(base, offset);
560
561 Copy::fill_to_memory_atomic(p, sz, value);
562 } UNSAFE_END
563
564 UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size)) {
565 size_t sz = (size_t)size;
566
567 oop srcp = JNIHandles::resolve(srcObj);
568 oop dstp = JNIHandles::resolve(dstObj);
569
570 void* src = index_oop_from_field_offset_long(srcp, srcOffset);
571 void* dst = index_oop_from_field_offset_long(dstp, dstOffset);
572
573 Copy::conjoint_memory_atomic(src, dst, sz);
574 } UNSAFE_END
575
576 // This function is a leaf since if the source and destination are both in native memory
577 // the copy may potentially be very large, and we don't want to disable GC if we can avoid it.
578 // If either source or destination (or both) are on the heap, the function will enter VM using
579 // JVM_ENTRY_FROM_LEAF
580 UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size, jlong elemSize)) {
581 size_t sz = (size_t)size;
582 size_t esz = (size_t)elemSize;
583
584 if (srcObj == NULL && dstObj == NULL) {
585 // Both src & dst are in native memory
586 address src = (address)srcOffset;
587 address dst = (address)dstOffset;
588
589 Copy::conjoint_swap(src, dst, sz, esz);
590 } else {
591 // At least one of src/dst are on heap, transition to VM to access raw pointers
592
593 JVM_ENTRY_FROM_LEAF(env, void, Unsafe_CopySwapMemory0) {
594 oop srcp = JNIHandles::resolve(srcObj);
595 oop dstp = JNIHandles::resolve(dstObj);
596
597 address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
598 address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);
599
600 Copy::conjoint_swap(src, dst, sz, esz);
601 } JVM_END
602 }
603 } UNSAFE_END
604
605 ////// Random queries
606
607 UNSAFE_LEAF(jint, Unsafe_AddressSize0(JNIEnv *env, jobject unsafe)) {
608 return sizeof(void*);
609 } UNSAFE_END
610
611 UNSAFE_LEAF(jint, Unsafe_PageSize()) {
612 return os::vm_page_size();
613 } UNSAFE_END
614
615 static jint find_field_offset(jobject field, int must_be_static, TRAPS) {
616 assert(field != NULL, "field must not be NULL");
617
618 oop reflected = JNIHandles::resolve_non_null(field);
619 oop mirror = java_lang_reflect_Field::clazz(reflected);
620 Klass* k = java_lang_Class::as_Klass(mirror);
621 int slot = java_lang_reflect_Field::slot(reflected);
622 int modifiers = java_lang_reflect_Field::modifiers(reflected);
623
624 if (must_be_static >= 0) {
625 int really_is_static = ((modifiers & JVM_ACC_STATIC) != 0);
626 if (must_be_static != really_is_static) {
627 THROW_0(vmSymbols::java_lang_IllegalArgumentException());
628 }
629 }
630
631 int offset = InstanceKlass::cast(k)->field_offset(slot);
632 return field_offset_from_byte_offset(offset);
633 }
634
635 UNSAFE_ENTRY(jlong, Unsafe_ObjectFieldOffset0(JNIEnv *env, jobject unsafe, jobject field)) {
636 return find_field_offset(field, 0, THREAD);
637 } UNSAFE_END
638
639 UNSAFE_ENTRY(jlong, Unsafe_StaticFieldOffset0(JNIEnv *env, jobject unsafe, jobject field)) {
640 return find_field_offset(field, 1, THREAD);
641 } UNSAFE_END
642
643 UNSAFE_ENTRY(jobject, Unsafe_StaticFieldBase0(JNIEnv *env, jobject unsafe, jobject field)) {
644 assert(field != NULL, "field must not be NULL");
645
646 // Note: In this VM implementation, a field address is always a short
647 // offset from the base of a a klass metaobject. Thus, the full dynamic
648 // range of the return type is never used. However, some implementations
649 // might put the static field inside an array shared by many classes,
650 // or even at a fixed address, in which case the address could be quite
651 // large. In that last case, this function would return NULL, since
652 // the address would operate alone, without any base pointer.
653
654 oop reflected = JNIHandles::resolve_non_null(field);
655 oop mirror = java_lang_reflect_Field::clazz(reflected);
656 int modifiers = java_lang_reflect_Field::modifiers(reflected);
657
658 if ((modifiers & JVM_ACC_STATIC) == 0) {
659 THROW_0(vmSymbols::java_lang_IllegalArgumentException());
660 }
661
662 return JNIHandles::make_local(env, mirror);
663 } UNSAFE_END
664
665 UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized0(JNIEnv *env, jobject unsafe, jobject clazz)) {
666 assert(clazz != NULL, "clazz must not be NULL");
667
668 oop mirror = JNIHandles::resolve_non_null(clazz);
669
670 Klass* klass = java_lang_Class::as_Klass(mirror);
671 if (klass != NULL && klass->should_be_initialized()) {
672 InstanceKlass* k = InstanceKlass::cast(klass);
673 k->initialize(CHECK);
674 }
675 }
676 UNSAFE_END
677
678 UNSAFE_ENTRY(jboolean, Unsafe_ShouldBeInitialized0(JNIEnv *env, jobject unsafe, jobject clazz)) {
679 assert(clazz != NULL, "clazz must not be NULL");
680
681 oop mirror = JNIHandles::resolve_non_null(clazz);
682 Klass* klass = java_lang_Class::as_Klass(mirror);
683
684 if (klass != NULL && klass->should_be_initialized()) {
685 return true;
686 }
687
688 return false;
689 }
690 UNSAFE_END
691
692 static void getBaseAndScale(int& base, int& scale, jclass clazz, TRAPS) {
693 assert(clazz != NULL, "clazz must not be NULL");
694
695 oop mirror = JNIHandles::resolve_non_null(clazz);
696 Klass* k = java_lang_Class::as_Klass(mirror);
697
698 if (k == NULL || !k->is_array_klass()) {
699 THROW(vmSymbols::java_lang_InvalidClassException());
700 } else if (k->is_objArray_klass()) {
701 base = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
702 scale = heapOopSize;
703 } else if (k->is_typeArray_klass()) {
704 TypeArrayKlass* tak = TypeArrayKlass::cast(k);
705 base = tak->array_header_in_bytes();
706 assert(base == arrayOopDesc::base_offset_in_bytes(tak->element_type()), "array_header_size semantics ok");
707 scale = (1 << tak->log2_element_size());
708 } else {
709 ShouldNotReachHere();
710 }
711 }
712
713 UNSAFE_ENTRY(jint, Unsafe_ArrayBaseOffset0(JNIEnv *env, jobject unsafe, jclass clazz)) {
714 int base = 0, scale = 0;
715 getBaseAndScale(base, scale, clazz, CHECK_0);
716
717 return field_offset_from_byte_offset(base);
718 } UNSAFE_END
719
720
721 UNSAFE_ENTRY(jint, Unsafe_ArrayIndexScale0(JNIEnv *env, jobject unsafe, jclass clazz)) {
722 int base = 0, scale = 0;
723 getBaseAndScale(base, scale, clazz, CHECK_0);
724
725 // This VM packs both fields and array elements down to the byte.
726 // But watch out: If this changes, so that array references for
727 // a given primitive type (say, T_BOOLEAN) use different memory units
728 // than fields, this method MUST return zero for such arrays.
729 // For example, the VM used to store sub-word sized fields in full
730 // words in the object layout, so that accessors like getByte(Object,int)
731 // did not really do what one might expect for arrays. Therefore,
732 // this function used to report a zero scale factor, so that the user
733 // would know not to attempt to access sub-word array elements.
734 // // Code for unpacked fields:
735 // if (scale < wordSize) return 0;
736
737 // The following allows for a pretty general fieldOffset cookie scheme,
738 // but requires it to be linear in byte offset.
739 return field_offset_from_byte_offset(scale) - field_offset_from_byte_offset(0);
740 } UNSAFE_END
741
798 env->GetStringUTFRegion(name, 0, unicode_len, utfName);
799
800 for (uint i = 0; i < len; i++) {
801 if (utfName[i] == '.') utfName[i] = '/';
802 }
803 }
804
805 result = JVM_DefineClass(env, utfName, loader, body, length, pd);
806
807 if (utfName && utfName != buf) {
808 FREE_C_HEAP_ARRAY(char, utfName);
809 }
810
811 free_body:
812 FREE_C_HEAP_ARRAY(jbyte, body);
813 return result;
814 }
815
816
817 UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd)) {
818 ThreadToNativeFromVM ttnfv(thread);
819
820 return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
821 } UNSAFE_END
822
823
824 // define a class but do not make it known to the class loader or system dictionary
825 // - host_class: supplies context for linkage, access control, protection domain, and class loader
826 // - data: bytes of a class file, a raw memory address (length gives the number of bytes)
827 // - cp_patches: where non-null entries exist, they replace corresponding CP entries in data
828
829 // When you load an anonymous class U, it works as if you changed its name just before loading,
830 // to a name that you will never use again. Since the name is lost, no other class can directly
831 // link to any member of U. Just after U is loaded, the only way to use it is reflectively,
832 // through java.lang.Class methods like Class.newInstance.
833
834 // Access checks for linkage sites within U continue to follow the same rules as for named classes.
835 // The package of an anonymous class is given by the package qualifier on the name under which it was loaded.
836 // An anonymous class also has special privileges to access any member of its host class.
837 // This is the main reason why this loading operation is unsafe. The purpose of this is to
931 }
932
933 ClassFileStream st(class_bytes, class_bytes_length, host_source, ClassFileStream::verify);
934
935 Symbol* no_class_name = NULL;
936 Klass* anonk = SystemDictionary::parse_stream(no_class_name,
937 host_loader,
938 host_domain,
939 &st,
940 host_klass,
941 cp_patches,
942 CHECK_NULL);
943 if (anonk == NULL) {
944 return NULL;
945 }
946
947 return instanceKlassHandle(THREAD, anonk);
948 }
949
950 UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass0(JNIEnv *env, jobject unsafe, jclass host_class, jbyteArray data, jobjectArray cp_patches_jh)) {
951 ResourceMark rm(THREAD);
952
953 instanceKlassHandle anon_klass;
954 jobject res_jh = NULL;
955 u1* temp_alloc = NULL;
956
957 anon_klass = Unsafe_DefineAnonymousClass_impl(env, host_class, data, cp_patches_jh, &temp_alloc, THREAD);
958 if (anon_klass() != NULL) {
959 res_jh = JNIHandles::make_local(env, anon_klass->java_mirror());
960 }
961
962 // try/finally clause:
963 if (temp_alloc != NULL) {
964 FREE_C_HEAP_ARRAY(u1, temp_alloc);
965 }
966
967 // The anonymous class loader data has been artificially been kept alive to
968 // this point. The mirror and any instances of this class have to keep
969 // it alive afterwards.
970 if (anon_klass() != NULL) {
971 anon_klass->class_loader_data()->set_keep_alive(false);
972 }
973
974 // let caller initialize it as needed...
975
976 return (jclass) res_jh;
977 } UNSAFE_END
978
979 UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr)) {
980 ThreadToNativeFromVM ttnfv(thread);
981 env->Throw(thr);
982 } UNSAFE_END
983
984 // JSR166 ------------------------------------------------------------------
985
986 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) {
987 oop x = JNIHandles::resolve(x_h);
988 oop e = JNIHandles::resolve(e_h);
989 oop p = JNIHandles::resolve(obj);
990 HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
991 oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
992 if (res != e) {
993 return false;
994 }
995
996 update_barrier_set((void*)addr, x);
997
998 return true;
999 } UNSAFE_END
1000
1001 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
1002 oop p = JNIHandles::resolve(obj);
1003 jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
1004
1005 return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
1006 } UNSAFE_END
1007
1008 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
1009 Handle p(THREAD, JNIHandles::resolve(obj));
1010 jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
1011
1012 #ifdef SUPPORTS_NATIVE_CX8
1013 return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
1014 #else
1015 if (VM_Version::supports_cx8()) {
1016 return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
1017 } else {
1018 MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
1019
1020 jlong val = Atomic::load(addr);
1021 if (val != e) {
1022 return false;
1023 }
1024
1025 Atomic::store(x, addr);
1026 return true;
1027 }
1028 #endif
1029 } UNSAFE_END
1030
1031 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) {
1032 EventThreadPark event;
1033 HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time);
1034
1035 JavaThreadParkedState jtps(thread, time != 0);
1036 thread->parker()->park(isAbsolute != 0, time);
1037
1038 HOTSPOT_THREAD_PARK_END((uintptr_t) thread->parker());
1039
1040 if (event.should_commit()) {
1041 oop obj = thread->current_park_blocker();
1042 event.set_klass((obj != NULL) ? obj->klass() : NULL);
1043 event.set_timeout(time);
1044 event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
1045 event.commit();
1046 }
1047 } UNSAFE_END
1048
1049 UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) {
1050 Parker* p = NULL;
1051
1052 if (jthread != NULL) {
1053 oop java_thread = JNIHandles::resolve_non_null(jthread);
1054 if (java_thread != NULL) {
1055 jlong lp = java_lang_Thread::park_event(java_thread);
1056 if (lp != 0) {
1057 // This cast is OK even though the jlong might have been read
1058 // non-atomically on 32bit systems, since there, one word will
1059 // always be zero anyway and the value set is always the same
1060 p = (Parker*)addr_from_java(lp);
1061 } else {
1062 // Grab lock if apparently null or using older version of library
1063 MutexLocker mu(Threads_lock);
1064 java_thread = JNIHandles::resolve_non_null(jthread);
1065
1066 if (java_thread != NULL) {
1067 JavaThread* thr = java_lang_Thread::thread(java_thread);
1068 if (thr != NULL) {
1069 p = thr->parker();
1070 if (p != NULL) { // Bind to Java thread for next time.
1071 java_lang_Thread::set_park_event(java_thread, addr_to_java(p));
1072 }
1073 }
1074 }
1075 }
1076 }
1077 }
1078
1079 if (p != NULL) {
1080 HOTSPOT_THREAD_UNPARK((uintptr_t) p);
1081 p->unpark();
1082 }
1083 } UNSAFE_END
1084
1085 UNSAFE_ENTRY(jint, Unsafe_GetLoadAverage0(JNIEnv *env, jobject unsafe, jdoubleArray loadavg, jint nelem)) {
1086 const int max_nelem = 3;
1087 double la[max_nelem];
1088 jint ret;
1089
1090 typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(loadavg));
1091 assert(a->is_typeArray(), "must be type array");
1092
1093 ret = os::loadavg(la, nelem);
1094 if (ret == -1) {
1095 return -1;
1096 }
1097
1098 // if successful, ret is the number of samples actually retrieved.
1099 assert(ret >= 0 && ret <= max_nelem, "Unexpected loadavg return value");
1100 switch(ret) {
1101 case 3: a->double_at_put(2, (jdouble)la[2]); // fall through
1102 case 2: a->double_at_put(1, (jdouble)la[1]); // fall through
1103 case 1: a->double_at_put(0, (jdouble)la[0]); break;
1104 }
1105
1213
1214 #undef ADR
1215 #undef LANG
1216 #undef OBJ
1217 #undef CLS
1218 #undef FLD
1219 #undef THR
1220 #undef DC_Args
1221 #undef DAC_Args
1222
1223 #undef DECLARE_GETPUTOOP
1224 #undef DECLARE_GETPUTNATIVE
1225
1226
1227 // This function is exported, used by NativeLookup.
1228 // The Unsafe_xxx functions above are called only from the interpreter.
1229 // The optimizer looks at names and signatures to recognize
1230 // individual functions.
1231
1232 JVM_ENTRY(void, JVM_RegisterJDKInternalMiscUnsafeMethods(JNIEnv *env, jclass unsafeclass)) {
1233 ThreadToNativeFromVM ttnfv(thread);
1234
1235 int ok = env->RegisterNatives(unsafeclass, jdk_internal_misc_Unsafe_methods, sizeof(jdk_internal_misc_Unsafe_methods)/sizeof(JNINativeMethod));
1236 guarantee(ok == 0, "register jdk.internal.misc.Unsafe natives");
1237 } JVM_END
|