< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 9733 : [mq]: webrev.00
rev 9734 : [mq]: webrev.01

@@ -2740,13 +2740,15 @@
   _collector(collector), _title(title), _trace_time(title) {
 
   _collector->resetYields();
   _collector->resetTimer();
   _collector->startTimer();
+  _collector->gc_timer_cm()->register_gc_concurrent_start(title);
 }
 
 CMSPhaseAccounting::~CMSPhaseAccounting() {
+  _collector->gc_timer_cm()->register_gc_concurrent_end();
   _collector->stopTimer();
   log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
   log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
 }
 

@@ -2906,11 +2908,10 @@
   //        "inconsistent argument?");
   // However that wouldn't be right, because it's possible that
   // a safepoint is indeed in progress as a young generation
   // stop-the-world GC happens even as we mark in this generation.
   assert(_collectorState == Marking, "inconsistent state?");
-  _gc_timer_cm->register_gc_concurrent_start("Concurrent Mark");
   check_correct_thread_executing();
   verify_overflow_empty();
 
   // Weak ref discovery note: We may be discovering weak
   // refs in this generation concurrent (but interleaved) with

@@ -2926,11 +2927,10 @@
     assert(_foregroundGCIsActive, "internal state inconsistency");
     assert(_restart_addr == NULL,  "foreground will restart from scratch");
     log_debug(gc)("bailing out to foreground collection");
   }
   verify_overflow_empty();
-  _gc_timer_cm->register_gc_concurrent_end();
   return res;
 }
 
 bool CMSCollector::markFromRootsWork() {
   // iterate over marked bits in bit map, doing a full scan and mark

@@ -5293,11 +5293,10 @@
 }
 #endif
 
 void CMSCollector::sweep() {
   assert(_collectorState == Sweeping, "just checking");
-  _gc_timer_cm->register_gc_concurrent_start("Concurrent Sweep");
   check_correct_thread_executing();
   verify_work_stacks_empty();
   verify_overflow_empty();
   increment_sweep_count();
   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());

@@ -5374,11 +5373,10 @@
   // the flag will be set again when a young collection is
   // attempted.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
   gch->update_full_collections_completed(_collection_count_start);
-  _gc_timer_cm->register_gc_concurrent_end();
 }
 
 // FIX ME!!! Looks like this belongs in CFLSpace, with
 // CMSGen merely delegating to it.
 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {

@@ -5485,10 +5483,11 @@
     assert(_collectorState == Idling, "The state should only change"
       " because the foreground collector has finished the collection");
     return;
   }
 
+  {
   // Clear the mark bitmap (no grey objects to start with)
   // for the next cycle.
   GCTraceCPUTime tcpu;
   CMSPhaseAccounting cmspa(this, "Concurrent Reset");
 

@@ -5525,10 +5524,11 @@
   // Because only the full (i.e., concurrent mode failure) collections
   // are being measured for gc overhead limits, clean the "near" flag
   // and count.
   size_policy()->reset_gc_overhead_limit_count();
   _collectorState = Idling;
+  }
 
   register_gc_end();
 }
 
 // Same as above but for STW paths
< prev index next >