426 HandleMark hm(THREAD);
427 const char* temp_dir = os::get_temp_directory();
428 Handle h = java_lang_String::create_from_platform_dependent_str(temp_dir, CHECK_NULL);
429 return (jstring) JNIHandles::make_local(env, h());
430 JVM_END
431
432
433 // java.lang.Runtime /////////////////////////////////////////////////////////////////////////
434
435 extern volatile jint vm_created;
436
437 JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))
438 before_exit(thread);
439 vm_exit(code);
440 JVM_END
441
442
443 JVM_ENTRY_NO_ENV(void, JVM_GC(void))
444 JVMWrapper("JVM_GC");
445 if (!DisableExplicitGC) {
446 Universe::heap()->collect(GCCause::_java_lang_system_gc);
447 }
448 JVM_END
449
450
451 JVM_LEAF(jlong, JVM_MaxObjectInspectionAge(void))
452 JVMWrapper("JVM_MaxObjectInspectionAge");
453 return Universe::heap()->millis_since_last_gc();
454 JVM_END
455
456
457 static inline jlong convert_size_t_to_jlong(size_t val) {
458 // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
459 NOT_LP64 (return (jlong)val;)
460 LP64_ONLY(return (jlong)MIN2(val, (size_t)max_jlong);)
461 }
462
463 JVM_ENTRY_NO_ENV(jlong, JVM_TotalMemory(void))
464 JVMWrapper("JVM_TotalMemory");
465 size_t n = Universe::heap()->capacity();
466 return convert_size_t_to_jlong(n);
467 JVM_END
468
469
470 JVM_ENTRY_NO_ENV(jlong, JVM_FreeMemory(void))
471 JVMWrapper("JVM_FreeMemory");
472 CollectedHeap* ch = Universe::heap();
473 size_t n;
474 {
475 MutexLocker x(Heap_lock);
476 n = ch->capacity() - ch->used();
477 }
478 return convert_size_t_to_jlong(n);
479 JVM_END
480
481
482 JVM_ENTRY_NO_ENV(jlong, JVM_MaxMemory(void))
483 JVMWrapper("JVM_MaxMemory");
484 size_t n = Universe::heap()->max_capacity();
485 return convert_size_t_to_jlong(n);
486 JVM_END
487
488
489 JVM_ENTRY_NO_ENV(jint, JVM_ActiveProcessorCount(void))
490 JVMWrapper("JVM_ActiveProcessorCount");
491 return os::active_processor_count();
492 JVM_END
493
494
495
496 // java.lang.Throwable //////////////////////////////////////////////////////
497
498
499 JVM_ENTRY(void, JVM_FillInStackTrace(JNIEnv *env, jobject receiver))
500 JVMWrapper("JVM_FillInStackTrace");
501 Handle exception(thread, JNIHandles::resolve_non_null(receiver));
502 java_lang_Throwable::fill_in_stack_trace(exception);
503 JVM_END
504
647 } else {
648 new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
649 }
650
651 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
652 // is modifying a reference field in the clonee, a non-oop-atomic copy might
653 // be suspended in the middle of copying the pointer and end up with parts
654 // of two different pointers in the field. Subsequent dereferences will crash.
655 // 4846409: an oop-copy of objects with long or double fields or arrays of same
656 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
657 // of oops. We know objects are aligned on a minimum of an jlong boundary.
658 // The same is true of StubRoutines::object_copy and the various oop_copy
659 // variants, and of the code generated by the inline_native_clone intrinsic.
660 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
661 Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
662 (size_t)align_object_size(size) / HeapWordsPerLong);
663 // Clear the header
664 new_obj_oop->init_mark();
665
666 // Store check (mark entire object and let gc sort it out)
667 BarrierSet* bs = Universe::heap()->barrier_set();
668 assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
669 bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
670
671 Handle new_obj(THREAD, new_obj_oop);
672 // Special handling for MemberNames. Since they contain Method* metadata, they
673 // must be registered so that RedefineClasses can fix metadata contained in them.
674 if (java_lang_invoke_MemberName::is_instance(new_obj()) &&
675 java_lang_invoke_MemberName::is_method(new_obj())) {
676 Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj());
677 // MemberName may be unresolved, so doesn't need registration until resolved.
678 if (method != NULL) {
679 methodHandle m(THREAD, method);
680 // This can safepoint and redefine method, so need both new_obj and method
681 // in a handle, for two different reasons. new_obj can move, method can be
682 // deleted if nothing is using it on the stack.
683 m->method_holder()->add_member_name(new_obj());
684 }
685 }
686
687 // Caution: this involves a java upcall, so the clone should be
|
426 HandleMark hm(THREAD);
427 const char* temp_dir = os::get_temp_directory();
428 Handle h = java_lang_String::create_from_platform_dependent_str(temp_dir, CHECK_NULL);
429 return (jstring) JNIHandles::make_local(env, h());
430 JVM_END
431
432
433 // java.lang.Runtime /////////////////////////////////////////////////////////////////////////
434
435 extern volatile jint vm_created;
436
437 JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))
438 before_exit(thread);
439 vm_exit(code);
440 JVM_END
441
442
443 JVM_ENTRY_NO_ENV(void, JVM_GC(void))
444 JVMWrapper("JVM_GC");
445 if (!DisableExplicitGC) {
446 GC::gc()->heap()->collect(GCCause::_java_lang_system_gc);
447 }
448 JVM_END
449
450
451 JVM_LEAF(jlong, JVM_MaxObjectInspectionAge(void))
452 JVMWrapper("JVM_MaxObjectInspectionAge");
453 return GC::gc()->heap()->millis_since_last_gc();
454 JVM_END
455
456
457 static inline jlong convert_size_t_to_jlong(size_t val) {
458 // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
459 NOT_LP64 (return (jlong)val;)
460 LP64_ONLY(return (jlong)MIN2(val, (size_t)max_jlong);)
461 }
462
463 JVM_ENTRY_NO_ENV(jlong, JVM_TotalMemory(void))
464 JVMWrapper("JVM_TotalMemory");
465 size_t n = GC::gc()->heap()->capacity();
466 return convert_size_t_to_jlong(n);
467 JVM_END
468
469
470 JVM_ENTRY_NO_ENV(jlong, JVM_FreeMemory(void))
471 JVMWrapper("JVM_FreeMemory");
472 CollectedHeap* ch = GC::gc()->heap();
473 size_t n;
474 {
475 MutexLocker x(Heap_lock);
476 n = ch->capacity() - ch->used();
477 }
478 return convert_size_t_to_jlong(n);
479 JVM_END
480
481
482 JVM_ENTRY_NO_ENV(jlong, JVM_MaxMemory(void))
483 JVMWrapper("JVM_MaxMemory");
484 size_t n = GC::gc()->heap()->max_capacity();
485 return convert_size_t_to_jlong(n);
486 JVM_END
487
488
489 JVM_ENTRY_NO_ENV(jint, JVM_ActiveProcessorCount(void))
490 JVMWrapper("JVM_ActiveProcessorCount");
491 return os::active_processor_count();
492 JVM_END
493
494
495
496 // java.lang.Throwable //////////////////////////////////////////////////////
497
498
499 JVM_ENTRY(void, JVM_FillInStackTrace(JNIEnv *env, jobject receiver))
500 JVMWrapper("JVM_FillInStackTrace");
501 Handle exception(thread, JNIHandles::resolve_non_null(receiver));
502 java_lang_Throwable::fill_in_stack_trace(exception);
503 JVM_END
504
647 } else {
648 new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
649 }
650
651 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
652 // is modifying a reference field in the clonee, a non-oop-atomic copy might
653 // be suspended in the middle of copying the pointer and end up with parts
654 // of two different pointers in the field. Subsequent dereferences will crash.
655 // 4846409: an oop-copy of objects with long or double fields or arrays of same
656 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
657 // of oops. We know objects are aligned on a minimum of an jlong boundary.
658 // The same is true of StubRoutines::object_copy and the various oop_copy
659 // variants, and of the code generated by the inline_native_clone intrinsic.
660 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
661 Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
662 (size_t)align_object_size(size) / HeapWordsPerLong);
663 // Clear the header
664 new_obj_oop->init_mark();
665
666 // Store check (mark entire object and let gc sort it out)
667 BarrierSet* bs = GC::gc()->heap()->barrier_set();
668 assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
669 bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
670
671 Handle new_obj(THREAD, new_obj_oop);
672 // Special handling for MemberNames. Since they contain Method* metadata, they
673 // must be registered so that RedefineClasses can fix metadata contained in them.
674 if (java_lang_invoke_MemberName::is_instance(new_obj()) &&
675 java_lang_invoke_MemberName::is_method(new_obj())) {
676 Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj());
677 // MemberName may be unresolved, so doesn't need registration until resolved.
678 if (method != NULL) {
679 methodHandle m(THREAD, method);
680 // This can safepoint and redefine method, so need both new_obj and method
681 // in a handle, for two different reasons. new_obj can move, method can be
682 // deleted if nothing is using it on the stack.
683 m->method_holder()->add_member_name(new_obj());
684 }
685 }
686
687 // Caution: this involves a java upcall, so the clone should be
|