< prev index next >

src/share/vm/gc/cms/vmCMSOperations.cpp

Print this page

        

@@ -21,10 +21,11 @@
  * questions.
  *
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"

@@ -37,23 +38,23 @@
 //////////////////////////////////////////////////////////
 // Methods in abstract class VM_CMS_Operation
 //////////////////////////////////////////////////////////
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    GenCollectedHeap::heap()->prepare_for_verify();
+    CMSHeap::heap()->prepare_for_verify();
     Universe::verify();
   }
 }
 
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
     Universe::verify();

@@ -110,17 +111,17 @@
   HS_PRIVATE_CMS_INITMARK_BEGIN();
   GCIdMark gc_id_mark(_gc_id);
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
   _collector->_gc_timer_cm->register_gc_pause_end();
 

@@ -138,17 +139,17 @@
   HS_PRIVATE_CMS_REMARK_BEGIN();
   GCIdMark gc_id_mark(_gc_id);
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
   _collector->save_heap_summary();
   _collector->_gc_timer_cm->register_gc_pause_end();

@@ -160,50 +161,50 @@
 // GenCollectedHeap heap.
 void VM_GenCollectFullConcurrent::doit() {
   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (_gc_count_before == gch->total_collections()) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (_gc_count_before == heap->total_collections()) {
     // The "full" of do_full_collection call below "forces"
     // a collection; the second arg, 0, below ensures that
     // only the young gen is collected. XXX In the future,
     // we'll probably need to have something in this interface
     // to say do this only if we are sure we will not bail
     // out to a full collection in this attempt, but that's
     // for the future.
     assert(SafepointSynchronize::is_at_safepoint(),
       "We can only be executing this arm of if at a safepoint");
-    GCCauseSetter gccs(gch, _gc_cause);
-    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
+    GCCauseSetter gccs(heap, _gc_cause);
+    heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
   } // Else no need for a foreground young gc
-  assert((_gc_count_before < gch->total_collections()) ||
+  assert((_gc_count_before < heap->total_collections()) ||
          (GCLocker::is_active() /* gc may have been skipped */
-          && (_gc_count_before == gch->total_collections())),
+          && (_gc_count_before == heap->total_collections())),
          "total_collections() should be monotonically increasing");
 
   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
-  if (gch->total_full_collections() == _full_gc_count_before) {
+  assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
+  if (heap->total_full_collections() == _full_gc_count_before) {
     // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   } else {
-    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
+    assert(_full_gc_count_before < heap->total_full_collections(), "Error");
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
   }
 }
 
 bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
   Thread* thr = Thread::current();
   assert(thr != NULL, "Unexpected tid");
   if (!thr->is_Java_thread()) {
     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    if (_gc_count_before != gch->total_collections()) {
+    CMSHeap* heap = CMSHeap::heap();
+    if (_gc_count_before != heap->total_collections()) {
       // No need to do a young gc, we'll just nudge the CMS thread
       // in the doit() method above, to be executed soon.
-      assert(_gc_count_before < gch->total_collections(),
+      assert(_gc_count_before < heap->total_collections(),
              "total_collections() should be monotonically increasing");
       return false;  // no need for foreground young gc
     }
   }
   return true;       // may still need foreground young gc

@@ -225,13 +226,13 @@
   // the completion count is monotonically increasing;
   // this will break for very long-running apps when the
   // count overflows and wraps around. XXX fix me !!!
   // e.g. at the rate of 1 full gc per ms, this could
   // overflow in about 1000 years.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
-      gch->total_full_collections_completed() <= _full_gc_count_before) {
+      heap->total_full_collections_completed() <= _full_gc_count_before) {
     // maybe we should change the condition to test _gc_cause ==
     // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
     // instead of _gc_cause != GCCause::_gc_locker
     assert(GCCause::is_user_requested_gc(_gc_cause),
            "the only way to get here if this was a System.gc()-induced GC");

@@ -243,10 +244,10 @@
     // while holding that lock.
     ThreadToNativeFromVM native(jt);
     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
     // Either a concurrent or a stop-world full gc is sufficient
     // witness to our request.
-    while (gch->total_full_collections_completed() <= _full_gc_count_before) {
+    while (heap->total_full_collections_completed() <= _full_gc_count_before) {
       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
     }
   }
 }
< prev index next >