< prev index next >

src/share/vm/prims/jvm.cpp

Print this page
rev 12854 : [mq]: gcinterface.patch


 426   HandleMark hm(THREAD);
 427   const char* temp_dir = os::get_temp_directory();
 428   Handle h = java_lang_String::create_from_platform_dependent_str(temp_dir, CHECK_NULL);
 429   return (jstring) JNIHandles::make_local(env, h());
 430 JVM_END
 431 
 432 
 433 // java.lang.Runtime /////////////////////////////////////////////////////////////////////////
 434 
 435 extern volatile jint vm_created;
 436 
 437 JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))
 438   before_exit(thread);
 439   vm_exit(code);
 440 JVM_END
 441 
 442 
 443 JVM_ENTRY_NO_ENV(void, JVM_GC(void))
 444   JVMWrapper("JVM_GC");
 445   if (!DisableExplicitGC) {
 446     Universe::heap()->collect(GCCause::_java_lang_system_gc);
 447   }
 448 JVM_END
 449 
 450 
 451 JVM_LEAF(jlong, JVM_MaxObjectInspectionAge(void))
 452   JVMWrapper("JVM_MaxObjectInspectionAge");
 453   return Universe::heap()->millis_since_last_gc();
 454 JVM_END
 455 
 456 
 457 static inline jlong convert_size_t_to_jlong(size_t val) {
 458   // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
 459   NOT_LP64 (return (jlong)val;)
 460   LP64_ONLY(return (jlong)MIN2(val, (size_t)max_jlong);)
 461 }
 462 
 463 JVM_ENTRY_NO_ENV(jlong, JVM_TotalMemory(void))
 464   JVMWrapper("JVM_TotalMemory");
 465   size_t n = Universe::heap()->capacity();
 466   return convert_size_t_to_jlong(n);
 467 JVM_END
 468 
 469 
 470 JVM_ENTRY_NO_ENV(jlong, JVM_FreeMemory(void))
 471   JVMWrapper("JVM_FreeMemory");
 472   CollectedHeap* ch = Universe::heap();
 473   size_t n;
 474   {
 475      MutexLocker x(Heap_lock);
 476      n = ch->capacity() - ch->used();
 477   }
 478   return convert_size_t_to_jlong(n);
 479 JVM_END
 480 
 481 
 482 JVM_ENTRY_NO_ENV(jlong, JVM_MaxMemory(void))
 483   JVMWrapper("JVM_MaxMemory");
 484   size_t n = Universe::heap()->max_capacity();
 485   return convert_size_t_to_jlong(n);
 486 JVM_END
 487 
 488 
 489 JVM_ENTRY_NO_ENV(jint, JVM_ActiveProcessorCount(void))
 490   JVMWrapper("JVM_ActiveProcessorCount");
 491   return os::active_processor_count();
 492 JVM_END
 493 
 494 
 495 
 496 // java.lang.Throwable //////////////////////////////////////////////////////
 497 
 498 
 499 JVM_ENTRY(void, JVM_FillInStackTrace(JNIEnv *env, jobject receiver))
 500   JVMWrapper("JVM_FillInStackTrace");
 501   Handle exception(thread, JNIHandles::resolve_non_null(receiver));
 502   java_lang_Throwable::fill_in_stack_trace(exception);
 503 JVM_END
 504 


 651   } else {
 652     new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
 653   }
 654 
 655   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 656   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 657   // be suspended in the middle of copying the pointer and end up with parts
 658   // of two different pointers in the field.  Subsequent dereferences will crash.
 659   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 660   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 661   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 662   // The same is true of StubRoutines::object_copy and the various oop_copy
 663   // variants, and of the code generated by the inline_native_clone intrinsic.
 664   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 665   Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
 666                                (size_t)align_object_size(size) / HeapWordsPerLong);
 667   // Clear the header
 668   new_obj_oop->init_mark();
 669 
 670   // Store check (mark entire object and let gc sort it out)
 671   BarrierSet* bs = Universe::heap()->barrier_set();
 672   assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
 673   bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
 674 
 675   Handle new_obj(THREAD, new_obj_oop);
 676   // Special handling for MemberNames.  Since they contain Method* metadata, they
 677   // must be registered so that RedefineClasses can fix metadata contained in them.
 678   if (java_lang_invoke_MemberName::is_instance(new_obj()) &&
 679       java_lang_invoke_MemberName::is_method(new_obj())) {
 680     Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj());
 681     // MemberName may be unresolved, so doesn't need registration until resolved.
 682     if (method != NULL) {
 683       methodHandle m(THREAD, method);
 684       // This can safepoint and redefine method, so need both new_obj and method
 685       // in a handle, for two different reasons.  new_obj can move, method can be
 686       // deleted if nothing is using it on the stack.
 687       m->method_holder()->add_member_name(new_obj, false);
 688     }
 689   }
 690 
 691   // Caution: this involves a java upcall, so the clone should be




 426   HandleMark hm(THREAD);
 427   const char* temp_dir = os::get_temp_directory();
 428   Handle h = java_lang_String::create_from_platform_dependent_str(temp_dir, CHECK_NULL);
 429   return (jstring) JNIHandles::make_local(env, h());
 430 JVM_END
 431 
 432 
 433 // java.lang.Runtime /////////////////////////////////////////////////////////////////////////
 434 
 435 extern volatile jint vm_created;
 436 
 437 JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))
 438   before_exit(thread);
 439   vm_exit(code);
 440 JVM_END
 441 
 442 
 443 JVM_ENTRY_NO_ENV(void, JVM_GC(void))
 444   JVMWrapper("JVM_GC");
 445   if (!DisableExplicitGC) {
 446     GC::gc()->heap()->collect(GCCause::_java_lang_system_gc);
 447   }
 448 JVM_END
 449 
 450 
 451 JVM_LEAF(jlong, JVM_MaxObjectInspectionAge(void))
 452   JVMWrapper("JVM_MaxObjectInspectionAge");
 453   return GC::gc()->heap()->millis_since_last_gc();
 454 JVM_END
 455 
 456 
 457 static inline jlong convert_size_t_to_jlong(size_t val) {
 458   // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
 459   NOT_LP64 (return (jlong)val;)
 460   LP64_ONLY(return (jlong)MIN2(val, (size_t)max_jlong);)
 461 }
 462 
 463 JVM_ENTRY_NO_ENV(jlong, JVM_TotalMemory(void))
 464   JVMWrapper("JVM_TotalMemory");
 465   size_t n = GC::gc()->heap()->capacity();
 466   return convert_size_t_to_jlong(n);
 467 JVM_END
 468 
 469 
 470 JVM_ENTRY_NO_ENV(jlong, JVM_FreeMemory(void))
 471   JVMWrapper("JVM_FreeMemory");
 472   CollectedHeap* ch = GC::gc()->heap();
 473   size_t n;
 474   {
 475      MutexLocker x(Heap_lock);
 476      n = ch->capacity() - ch->used();
 477   }
 478   return convert_size_t_to_jlong(n);
 479 JVM_END
 480 
 481 
 482 JVM_ENTRY_NO_ENV(jlong, JVM_MaxMemory(void))
 483   JVMWrapper("JVM_MaxMemory");
 484   size_t n = GC::gc()->heap()->max_capacity();
 485   return convert_size_t_to_jlong(n);
 486 JVM_END
 487 
 488 
 489 JVM_ENTRY_NO_ENV(jint, JVM_ActiveProcessorCount(void))
 490   JVMWrapper("JVM_ActiveProcessorCount");
 491   return os::active_processor_count();
 492 JVM_END
 493 
 494 
 495 
 496 // java.lang.Throwable //////////////////////////////////////////////////////
 497 
 498 
 499 JVM_ENTRY(void, JVM_FillInStackTrace(JNIEnv *env, jobject receiver))
 500   JVMWrapper("JVM_FillInStackTrace");
 501   Handle exception(thread, JNIHandles::resolve_non_null(receiver));
 502   java_lang_Throwable::fill_in_stack_trace(exception);
 503 JVM_END
 504 


 651   } else {
 652     new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
 653   }
 654 
 655   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 656   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 657   // be suspended in the middle of copying the pointer and end up with parts
 658   // of two different pointers in the field.  Subsequent dereferences will crash.
 659   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 660   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 661   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 662   // The same is true of StubRoutines::object_copy and the various oop_copy
 663   // variants, and of the code generated by the inline_native_clone intrinsic.
 664   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 665   Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
 666                                (size_t)align_object_size(size) / HeapWordsPerLong);
 667   // Clear the header
 668   new_obj_oop->init_mark();
 669 
 670   // Store check (mark entire object and let gc sort it out)
 671   BarrierSet* bs = GC::gc()->heap()->barrier_set();
 672   assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
 673   bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
 674 
 675   Handle new_obj(THREAD, new_obj_oop);
 676   // Special handling for MemberNames.  Since they contain Method* metadata, they
 677   // must be registered so that RedefineClasses can fix metadata contained in them.
 678   if (java_lang_invoke_MemberName::is_instance(new_obj()) &&
 679       java_lang_invoke_MemberName::is_method(new_obj())) {
 680     Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj());
 681     // MemberName may be unresolved, so doesn't need registration until resolved.
 682     if (method != NULL) {
 683       methodHandle m(THREAD, method);
 684       // This can safepoint and redefine method, so need both new_obj and method
 685       // in a handle, for two different reasons.  new_obj can move, method can be
 686       // deleted if nothing is using it on the stack.
 687       m->method_holder()->add_member_name(new_obj, false);
 688     }
 689   }
 690 
 691   // Caution: this involves a java upcall, so the clone should be


< prev index next >