--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-08-03 16:11:03.505643260 -0700 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-08-03 16:11:03.401643264 -0700 @@ -1260,7 +1260,7 @@ assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); - ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q(), ref_processor_stw()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q()); // Enqueue any discovered reference objects that have // not been removed from the discovered lists. @@ -1671,7 +1671,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : CollectedHeap(), _collector_policy(collector_policy), - _g1_policy(create_g1_policy()), + _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), + _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), + _g1_policy(create_g1_policy(_gc_timer_stw)), _collection_set(this, _g1_policy), _dirty_card_queue_set(false), _is_alive_closure_cm(this), @@ -1698,9 +1700,7 @@ _expand_heap_after_alloc_failure(true), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), - _in_cset_fast_test(), - _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), - _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) { + _in_cset_fast_test() { _workers = new WorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, @@ -2054,9 +2054,6 @@ &_is_alive_closure_stw); // is alive closure // (for efficiency/performance) - _ref_phase_times = new ReferenceProcessorPhaseTimes(_gc_timer_stw, - ParallelGCThreads, - mt_processing); } CollectorPolicy* G1CollectedHeap::collector_policy() const { @@ -4322,7 +4319,7 @@ // Setup the soft refs policy... rp->setup_policy(false); - ref_phase_times()->reset(); + ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { @@ -4331,7 +4328,7 @@ &keep_alive, &drain_queue, NULL, - ref_phase_times()); + pt); } else { uint no_of_gc_workers = workers()->active_workers(); @@ -4345,7 +4342,7 @@ &keep_alive, &drain_queue, &par_task_executor, - ref_phase_times()); + pt); } _gc_tracer_stw->report_gc_reference_stats(stats); @@ -4364,11 +4361,13 @@ ReferenceProcessor* rp = _ref_processor_stw; assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); + ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); + // Now enqueue any remaining on the discovered lists on to // the pending list. if (!rp->processing_is_mt()) { // Serial reference processing... - rp->enqueue_discovered_references(NULL, ref_phase_times()); + rp->enqueue_discovered_references(NULL, pt); } else { // Parallel reference enqueueing @@ -4379,7 +4378,7 @@ n_workers, rp->max_num_q()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers); - rp->enqueue_discovered_references(&par_task_executor, ref_phase_times()); + rp->enqueue_discovered_references(&par_task_executor, pt); } rp->verify_no_references_recorded();