< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp

Print this page




 239 
 240 void VM_GenCollectFullConcurrent::doit_epilogue() {
 241   Thread* thr = Thread::current();
 242   assert(thr->is_Java_thread(), "just checking");
 243   JavaThread* jt = (JavaThread*)thr;
 244   // Release the Heap_lock first.
 245   Heap_lock->unlock();
 246   release_and_notify_pending_list_lock();
 247 
 248   // It is fine to test whether completed collections has
 249   // exceeded our request count without locking because
 250   // the completion count is monotonically increasing;
 251   // this will break for very long-running apps when the
 252   // count overflows and wraps around. XXX fix me !!!
 253   // e.g. at the rate of 1 full gc per ms, this could
 254   // overflow in about 1000 years.
 255   GenCollectedHeap* gch = GenCollectedHeap::heap();
 256   if (_gc_cause != GCCause::_gc_locker &&
 257       gch->total_full_collections_completed() <= _full_gc_count_before) {
 258     // maybe we should change the condition to test _gc_cause ==
 259     // GCCause::_java_lang_system_gc, instead of
 260     // _gc_cause != GCCause::_gc_locker
 261     assert(_gc_cause == GCCause::_java_lang_system_gc,
 262            "the only way to get here if this was a System.gc()-induced GC");
 263     assert(ExplicitGCInvokesConcurrent, "Error");
 264     // Now, wait for witnessing concurrent gc cycle to complete,
 265     // but do so in native mode, because we want to lock the
 266     // FullGCEvent_lock, which may be needed by the VM thread
 267     // or by the CMS thread, so we do not want to be suspended
 268     // while holding that lock.
 269     ThreadToNativeFromVM native(jt);
 270     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 271     // Either a concurrent or a stop-world full gc is sufficient
 272     // witness to our request.
 273     while (gch->total_full_collections_completed() <= _full_gc_count_before) {
 274       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 275     }
 276   }
 277 }


 239 
 240 void VM_GenCollectFullConcurrent::doit_epilogue() {
 241   Thread* thr = Thread::current();
 242   assert(thr->is_Java_thread(), "just checking");
 243   JavaThread* jt = (JavaThread*)thr;
 244   // Release the Heap_lock first.
 245   Heap_lock->unlock();
 246   release_and_notify_pending_list_lock();
 247 
 248   // It is fine to test whether completed collections has
 249   // exceeded our request count without locking because
 250   // the completion count is monotonically increasing;
 251   // this will break for very long-running apps when the
 252   // count overflows and wraps around. XXX fix me !!!
 253   // e.g. at the rate of 1 full gc per ms, this could
 254   // overflow in about 1000 years.
 255   GenCollectedHeap* gch = GenCollectedHeap::heap();
 256   if (_gc_cause != GCCause::_gc_locker &&
 257       gch->total_full_collections_completed() <= _full_gc_count_before) {
 258     // maybe we should change the condition to test _gc_cause ==
 259     // GCCause::_java_lang_system_gc or _dcmd_gc_run, instead of
 260     // _gc_cause != GCCause::_gc_locker
 261     assert(GCCause::is_user_requested_gc(_gc_cause),
 262            "the only way to get here if this was a System.gc()-induced GC");
 263     assert(ExplicitGCInvokesConcurrent, "Error");
 264     // Now, wait for witnessing concurrent gc cycle to complete,
 265     // but do so in native mode, because we want to lock the
 266     // FullGCEvent_lock, which may be needed by the VM thread
 267     // or by the CMS thread, so we do not want to be suspended
 268     // while holding that lock.
 269     ThreadToNativeFromVM native(jt);
 270     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 271     // Either a concurrent or a stop-world full gc is sufficient
 272     // witness to our request.
 273     while (gch->total_full_collections_completed() <= _full_gc_count_before) {
 274       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 275     }
 276   }
 277 }
< prev index next >