< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page

        

@@ -439,11 +439,12 @@
   _has_overflown(false),
   _concurrent(false),
   _has_aborted(false),
   _restart_for_overflow(false),
   _concurrent_marking_in_progress(false),
-  _concurrent_phase_status(ConcPhaseNotStarted),
+  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 
   // _verbose_level set below
 
   _init_times(),
   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),

@@ -1005,48 +1006,28 @@
     // mainly used for sanity checking.
     root_regions()->scan_finished();
   }
 }
 
-void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
-  uint old_val = 0;
-  do {
-    old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
-  } while (old_val != ConcPhaseNotStarted);
-  _g1h->gc_timer_cm()->register_gc_concurrent_start(title);
+void G1ConcurrentMark::concurrent_cycle_start() {
+  _gc_timer_cm->register_gc_start();
+
+  _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
+
+  _g1h->trace_heap_before_gc(_gc_tracer_cm);
 }
 
-void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
-  if (_concurrent_phase_status == ConcPhaseNotStarted) {
-    return;
-  }
+void G1ConcurrentMark::concurrent_cycle_end() {
+  _g1h->trace_heap_after_gc(_gc_tracer_cm);
 
-  uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
-  if (old_val == ConcPhaseStarted) {
-    _g1h->gc_timer_cm()->register_gc_concurrent_end();
-    // If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
-    // We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
-    // starting a new concurrent phase by 'ConcurrentMarkThread'.
-    if (end_timer) {
-      _g1h->gc_timer_cm()->register_gc_end();
-    }
-    old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
-    assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
-  } else {
-    do {
-      // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
-      os::naked_short_sleep(1);
-    } while (_concurrent_phase_status != ConcPhaseNotStarted);
+  if (has_aborted()) {
+    _gc_tracer_cm->report_concurrent_mode_failure();
   }
-}
 
-void G1ConcurrentMark::register_concurrent_phase_end() {
-  register_concurrent_phase_end_common(false);
-}
+  _gc_timer_cm->register_gc_end();
 
-void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
-  register_concurrent_phase_end_common(true);
+  _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 }
 
 void G1ConcurrentMark::markFromRoots() {
   // we might be tempted to assert that:
   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),

@@ -1122,11 +1103,11 @@
     // Clear the marking state because we will be restarting
     // marking due to overflowing the global mark stack.
     reset_marking_state();
   } else {
     {
-      GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
+      GCTraceTime(Debug, gc) trace("Aggregate Data", _gc_timer_cm);
 
       // Aggregate the per-task counting data that we have accumulated
       // while marking.
       aggregate_count_data();
     }

@@ -1161,11 +1142,11 @@
   _remark_times.add((now - start) * 1000.0);
 
   g1p->record_concurrent_mark_remark_end();
 
   G1CMIsAliveClosure is_alive(g1h);
-  g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
+  _gc_tracer_cm->report_object_count_after_gc(&is_alive);
 }
 
 // Base class of the closures that finalize and verify the
 // liveness counting data.
 class G1CMCountDataClosureBase: public HeapRegionClosure {

@@ -1750,12 +1731,10 @@
 
   // We reclaimed old regions so we should calculate the sizes to make
   // sure we update the old gen/space data.
   g1h->g1mm()->update_sizes();
   g1h->allocation_context_stats().update_after_mark();
-
-  g1h->trace_heap_after_concurrent_cycle();
 }
 
 void G1ConcurrentMark::completeCleanup() {
   if (has_aborted()) return;
 

@@ -2043,11 +2022,11 @@
   G1CMIsAliveClosure g1_is_alive(g1h);
 
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
-    GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm());
+    GCTraceTime(Debug, gc) trace("Reference Processing", _gc_timer_cm);
 
     ReferenceProcessor* rp = g1h->ref_processor_cm();
 
     // See the comment in G1CollectedHeap::ref_processing_init()
     // about how reference processing currently works in G1.

@@ -2100,12 +2079,12 @@
     const ReferenceProcessorStats& stats =
         rp->process_discovered_references(&g1_is_alive,
                                           &g1_keep_alive,
                                           &g1_drain_mark_stack,
                                           executor,
-                                          g1h->gc_timer_cm());
-    g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
+                                          _gc_timer_cm);
+    _gc_tracer_cm->report_gc_reference_stats(stats);
 
     // The do_oop work routines of the keep_alive and drain_marking_stack
     // oop closures will set the has_overflown flag if we overflow the
     // global marking stack.
 

@@ -2133,28 +2112,28 @@
 
   assert(_markStack.isEmpty(), "Marking should have completed");
 
   // Unload Klasses, String, Symbols, Code Cache, etc.
   {
-    GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
+    GCTraceTime(Debug, gc) trace("Unloading", _gc_timer_cm);
 
     if (ClassUnloadingWithConcurrentMark) {
       bool purged_classes;
 
       {
-        GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
+        GCTraceTime(Trace, gc) trace("System Dictionary Unloading", _gc_timer_cm);
         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
       }
 
       {
-        GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
+        GCTraceTime(Trace, gc) trace("Parallel Unloading", _gc_timer_cm);
         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
       }
     }
 
     if (G1StringDedup::is_enabled()) {
-      GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
+      GCTraceTime(Trace, gc) trace("String Deduplication Unlink", _gc_timer_cm);
       G1StringDedup::unlink(&g1_is_alive);
     }
   }
 }
 

@@ -2271,11 +2250,11 @@
 void G1ConcurrentMark::checkpointRootsFinalWork() {
   ResourceMark rm;
   HandleMark   hm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
+  GCTraceTime(Debug, gc) trace("Finalize Marking", _gc_timer_cm);
 
   g1h->ensure_parsability(false);
 
   // this is remark, so we'll use up all active threads
   uint active_workers = g1h->workers()->active_workers();

@@ -2627,14 +2606,10 @@
   // This can be called either during or outside marking, we'll read
   // the expected_active value from the SATB queue set.
   satb_mq_set.set_active_all_threads(
                                  false, /* new active value */
                                  satb_mq_set.is_active() /* expected_active */);
-
-  _g1h->trace_heap_after_concurrent_cycle();
-
-  _g1h->register_concurrent_cycle_end();
 }
 
 static void print_ms_time_info(const char* prefix, const char* name,
                                NumberSeq& ns) {
   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
< prev index next >