< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page

        

*** 927,936 **** --- 927,938 ---- _gc_timer_cm->register_gc_start(); _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); _g1h->trace_heap_before_gc(_gc_tracer_cm); + // Record start, but take no time + TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::CycleStart, _g1h->gc_cause()); } void G1ConcurrentMark::concurrent_cycle_end() { _g1h->collector_state()->set_clearing_next_bitmap(false);
*** 942,951 **** --- 944,955 ---- } _gc_timer_cm->register_gc_end(); _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); + // Record end, but take no time + TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::CycleEnd, _g1h->gc_cause()); } void G1ConcurrentMark::mark_from_roots() { _restart_for_overflow = false;
*** 1119,1128 **** --- 1123,1134 ---- // have ended up here as the Remark VM operation has been scheduled already. if (has_aborted()) { return; } + TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::Remark, _g1h->gc_cause()); + G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_remark_start(); double start = os::elapsedTime();
*** 1335,1344 **** --- 1341,1352 ---- // If a full collection has happened, we shouldn't do this. if (has_aborted()) { return; } + TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::Cleanup, _g1h->gc_cause()); + G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_cleanup_start(); double start = os::elapsedTime();
< prev index next >