< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

@@ -51,10 +51,11 @@
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
+#include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.hpp"
 #include "gc/shared/generationSpec.hpp"

@@ -1455,10 +1456,11 @@
 
   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
   gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
+  GCIdMark gc_id_mark;
   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
 
   SvcGCMarker sgcm(SvcGCMarker::FULL);
   ResourceMark rm;
 

@@ -1481,11 +1483,11 @@
     // Timing
     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
-      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
+      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
       TraceCollectorStats tcs(g1mm()->full_collection_counters());
       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
 
       g1_policy()->record_full_collection_start();
 

@@ -3930,11 +3932,11 @@
 void G1CollectedHeap::log_gc_header() {
   if (!G1Log::fine()) {
     return;
   }
 
-  gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
+  gclog_or_tty->gclog_stamp();
 
   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
 

@@ -3988,17 +3990,21 @@
     return false;
   }
 
   _gc_timer_stw->register_gc_start();
 
-  _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
 
   SvcGCMarker sgcm(SvcGCMarker::MINOR);
   ResourceMark rm;
 
   wait_for_root_region_scanning();
 
+  bool should_start_conc_mark = fasle;
+  {
+    GCIdMark gc_id_mark;
+    _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
+
   G1Log::update_level();
   print_heap_before_gc();
   trace_heap_before_gc(_gc_tracer_stw);
 
   verify_region_sets_optional();

@@ -4017,11 +4023,11 @@
   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
 
   // Record whether this pause is an initial mark. When the current
   // thread has completed its logging output and it's safe to signal
   // the CM thread, the flag's value in the policy has been reset.
-  bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
+    should_start_conc_mark = collector_state()->during_initial_mark_pause();
 
   // Inner scope for scope based logging, timers, and stats collection
   {
     EvacuationInfo evacuation_info;
 

@@ -4341,10 +4347,11 @@
     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
   }
   // It should now be safe to tell the concurrent mark thread to start
   // without its logging output interfering with the logging output
   // that came from the pause.
+  }
 
   if (should_start_conc_mark) {
     // CAUTION: after the doConcurrentMark() call below,
     // the concurrent marking thread(s) could be running
     // concurrently with us. Make sure that anything after

@@ -5563,24 +5570,22 @@
     // Serial reference processing...
     stats = rp->process_discovered_references(&is_alive,
                                               &keep_alive,
                                               &drain_queue,
                                               NULL,
-                                              _gc_timer_stw,
-                                              _gc_tracer_stw->gc_id());
+                                              _gc_timer_stw);
   } else {
     // Parallel reference processing
     assert(rp->num_q() == no_of_gc_workers, "sanity");
     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
 
     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
     stats = rp->process_discovered_references(&is_alive,
                                               &keep_alive,
                                               &drain_queue,
                                               &par_task_executor,
-                                              _gc_timer_stw,
-                                              _gc_tracer_stw->gc_id());
+                                              _gc_timer_stw);
   }
 
   _gc_tracer_stw->report_gc_reference_stats(stats);
 
   // We have completed copying any necessary live referent objects.
< prev index next >