< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

@@ -1765,19 +1765,16 @@
   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
   _old_evac_stats("Old", OldPLABSize, PLABWeight),
   _expand_heap_after_alloc_failure(true),
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
-  _heap_summary_sent(false),
   _in_cset_fast_test(),
   _dirty_cards_region_list(NULL),
   _worker_cset_start_region(NULL),
   _worker_cset_start_region_time_stamp(NULL),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
-  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
-  _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
-  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
+  _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
 
   _workers = new WorkGang("GC Thread", ParallelGCThreads,
                           /* are_GC_task_threads */true,
                           /* are_ConcurrentGC_threads */false);
   _workers->initialize_workers();

@@ -2314,56 +2311,10 @@
   // and it's waiting for a full GC to finish will be woken up. It is
   // waiting in VM_G1IncCollectionPause::doit_epilogue().
   FullGCCount_lock->notify_all();
 }
 
-void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
-  GCIdMarkAndRestore conc_gc_id_mark;
-  collector_state()->set_concurrent_cycle_started(true);
-  _gc_timer_cm->register_gc_start(start_time);
-
-  _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
-  trace_heap_before_gc(_gc_tracer_cm);
-  _cmThread->set_gc_id(GCId::current());
-}
-
-void G1CollectedHeap::register_concurrent_cycle_end() {
-  if (collector_state()->concurrent_cycle_started()) {
-    GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
-    if (_cm->has_aborted()) {
-      _gc_tracer_cm->report_concurrent_mode_failure();
-
-      // ConcurrentGCTimer will be ended as well.
-      _cm->register_concurrent_gc_end_and_stop_timer();
-    } else {
-      _gc_timer_cm->register_gc_end();
-    }
-
-    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
-
-    // Clear state variables to prepare for the next concurrent cycle.
-    collector_state()->set_concurrent_cycle_started(false);
-    _heap_summary_sent = false;
-  }
-}
-
-void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
-  if (collector_state()->concurrent_cycle_started()) {
-    // This function can be called when:
-    //  the cleanup pause is run
-    //  the concurrent cycle is aborted before the cleanup pause.
-    //  the concurrent cycle is aborted after the cleanup pause,
-    //   but before the concurrent cycle end has been registered.
-    // Make sure that we only send the heap information once.
-    if (!_heap_summary_sent) {
-      GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
-      trace_heap_after_gc(_gc_tracer_cm);
-      _heap_summary_sent = true;
-    }
-  }
-}
-
 void G1CollectedHeap::collect(GCCause::Cause cause) {
   assert_heap_not_locked();
 
   uint gc_count_before;
   uint old_marking_count_before;

@@ -2844,11 +2795,12 @@
 
   size_t eden_capacity_bytes =
     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
 
   VirtualSpaceSummary heap_summary = create_heap_space_summary();
-  return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
+  return G1HeapSummary(heap_summary, (Heap_lock->owned_by_self() ? used() : used_unlocked()),
+                       eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
 }
 
 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
                        stats->unused(), stats->used(), stats->region_end_waste(),

@@ -2862,11 +2814,10 @@
 
   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 }
 
-
 G1CollectedHeap* G1CollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
   return (G1CollectedHeap*)heap;

@@ -3231,11 +3182,11 @@
 
     if (collector_state()->during_initial_mark_pause()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
       increment_old_marking_cycles_started();
-      register_concurrent_cycle_start(_gc_timer_stw->gc_start());
+      _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
     }
 
     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
 
     GCTraceCPUTime tcpu;
< prev index next >