--- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2017-08-03 16:11:02.173643307 -0700 +++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp 2017-08-03 16:11:02.069643310 -0700 @@ -5185,7 +5185,7 @@ CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); - ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q(), rp->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); { GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm); --- old/src/share/vm/gc/cms/parNewGeneration.cpp 2017-08-03 16:11:02.889643282 -0700 +++ new/src/share/vm/gc/cms/parNewGeneration.cpp 2017-08-03 16:11:02.785643285 -0700 @@ -984,7 +984,7 @@ // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); ReferenceProcessorStats stats; - ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q(), rp->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q()); if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, --- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-08-03 16:11:03.505643260 -0700 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-08-03 16:11:03.401643264 -0700 @@ -1260,7 +1260,7 @@ assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); - ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q(), ref_processor_stw()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q()); // Enqueue any discovered reference objects that have // not been removed from the discovered lists. @@ -1671,7 +1671,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : CollectedHeap(), _collector_policy(collector_policy), - _g1_policy(create_g1_policy()), + _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), + _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), + _g1_policy(create_g1_policy(_gc_timer_stw)), _collection_set(this, _g1_policy), _dirty_card_queue_set(false), _is_alive_closure_cm(this), @@ -1698,9 +1700,7 @@ _expand_heap_after_alloc_failure(true), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), - _in_cset_fast_test(), - _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), - _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) { + _in_cset_fast_test() { _workers = new WorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, @@ -2054,9 +2054,6 @@ &_is_alive_closure_stw); // is alive closure // (for efficiency/performance) - _ref_phase_times = new ReferenceProcessorPhaseTimes(_gc_timer_stw, - ParallelGCThreads, - mt_processing); } CollectorPolicy* G1CollectedHeap::collector_policy() const { @@ -4322,7 +4319,7 @@ // Setup the soft refs policy... rp->setup_policy(false); - ref_phase_times()->reset(); + ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { @@ -4331,7 +4328,7 @@ &keep_alive, &drain_queue, NULL, - ref_phase_times()); + pt); } else { uint no_of_gc_workers = workers()->active_workers(); @@ -4345,7 +4342,7 @@ &keep_alive, &drain_queue, &par_task_executor, - ref_phase_times()); + pt); } _gc_tracer_stw->report_gc_reference_stats(stats); @@ -4364,11 +4361,13 @@ ReferenceProcessor* rp = _ref_processor_stw; assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); + ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); + // Now enqueue any remaining on the discovered lists on to // the pending list. if (!rp->processing_is_mt()) { // Serial reference processing... - rp->enqueue_discovered_references(NULL, ref_phase_times()); + rp->enqueue_discovered_references(NULL, pt); } else { // Parallel reference enqueueing @@ -4379,7 +4378,7 @@ n_workers, rp->max_num_q()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers); - rp->enqueue_discovered_references(&par_task_executor, ref_phase_times()); + rp->enqueue_discovered_references(&par_task_executor, pt); } rp->verify_no_references_recorded(); --- old/src/share/vm/gc/g1/g1CollectedHeap.hpp 2017-08-03 16:11:04.229643235 -0700 +++ new/src/share/vm/gc/g1/g1CollectedHeap.hpp 2017-08-03 16:11:04.097643240 -0700 @@ -298,7 +298,7 @@ size_t size, size_t translation_factor); - static G1Policy* create_g1_policy(); + static G1Policy* create_g1_policy(STWGCTimer* gc_timer); void trace_heap(GCWhen::Type when, const GCTracer* tracer); @@ -370,6 +370,10 @@ G1EdenRegions _eden; G1SurvivorRegions _survivor; + STWGCTimer* _gc_timer_stw; + + G1NewTracer* _gc_tracer_stw; + // The current policy object for the collector. G1Policy* _g1_policy; G1HeapSizingPolicy* _heap_sizing_policy; @@ -901,12 +905,6 @@ // The (stw) reference processor... ReferenceProcessor* _ref_processor_stw; - ReferenceProcessorPhaseTimes* _ref_phase_times; - - STWGCTimer* _gc_timer_stw; - - G1NewTracer* _gc_tracer_stw; - // During reference object discovery, the _is_alive_non_header // closure (if non-null) is applied to the referent object to // determine whether the referent is live. If so then the @@ -1025,8 +1023,6 @@ // The STW reference processor.... ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } - ReferenceProcessorPhaseTimes* ref_phase_times() const { return _ref_phase_times; } - G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; } // The Concurrent Marking reference processor... --- old/src/share/vm/gc/g1/g1CollectedHeap_ext.cpp 2017-08-03 16:11:04.857643213 -0700 +++ new/src/share/vm/gc/g1/g1CollectedHeap_ext.cpp 2017-08-03 16:11:04.741643217 -0700 @@ -28,6 +28,8 @@ #include "gc/g1/g1ParScanThreadState.hpp" #include "gc/g1/heapRegion.inline.hpp" +class STWGCTimer; + bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, @@ -40,6 +42,6 @@ return new HeapRegion(hrs_index, bot(), mr); } -G1Policy* G1CollectedHeap::create_g1_policy() { - return new G1DefaultPolicy(); +G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) { + return new G1DefaultPolicy(gc_timer); } --- old/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2017-08-03 16:11:05.485643191 -0700 +++ new/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2017-08-03 16:11:05.353643196 -0700 @@ -1718,7 +1718,7 @@ // Reference lists are balanced (see balance_all_queues() and balance_queues()). rp->set_active_mt_degree(active_workers); - ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q(), rp->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); // Process the weak references. const ReferenceProcessorStats& stats = --- old/src/share/vm/gc/g1/g1DefaultPolicy.cpp 2017-08-03 16:11:06.145643168 -0700 +++ new/src/share/vm/gc/g1/g1DefaultPolicy.cpp 2017-08-03 16:11:06.045643172 -0700 @@ -47,7 +47,7 @@ #include "utilities/growableArray.hpp" #include "utilities/pair.hpp" -G1DefaultPolicy::G1DefaultPolicy() : +G1DefaultPolicy::G1DefaultPolicy(STWGCTimer* gc_timer) : _predictor(G1ConfidencePercent / 100.0), _analytics(new G1Analytics(&_predictor)), _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), @@ -63,7 +63,7 @@ _initial_mark_to_mixed(), _collection_set(NULL), _g1(NULL), - _phase_times(new G1GCPhaseTimes(ParallelGCThreads)), + _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _tenuring_threshold(MaxTenuringThreshold), _max_survivor_regions(0), _survivors_age_table(true), --- old/src/share/vm/gc/g1/g1DefaultPolicy.hpp 2017-08-03 16:11:06.773643147 -0700 +++ new/src/share/vm/gc/g1/g1DefaultPolicy.hpp 2017-08-03 16:11:06.673643150 -0700 @@ -44,6 +44,7 @@ class G1SurvivorRegions; class G1YoungGenSizer; class GCPolicyCounters; +class STWGCTimer; class G1DefaultPolicy: public G1Policy { private: @@ -264,7 +265,7 @@ void abort_time_to_mixed_tracking(); public: - G1DefaultPolicy(); + G1DefaultPolicy(STWGCTimer* gc_timer); virtual ~G1DefaultPolicy(); --- old/src/share/vm/gc/g1/g1GCPhaseTimes.cpp 2017-08-03 16:11:07.445643123 -0700 +++ new/src/share/vm/gc/g1/g1GCPhaseTimes.cpp 2017-08-03 16:11:07.345643127 -0700 @@ -37,10 +37,11 @@ static const char* Indents[5] = {"", " ", " ", " ", " "}; -G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : +G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) : _max_gc_threads(max_gc_threads), _gc_start_counter(0), - _gc_pause_time_ms(0.0) + _gc_pause_time_ms(0.0), + _ref_phase_times((GCTimer*)gc_timer, max_gc_threads) { assert(max_gc_threads > 0, "Must have some GC threads"); @@ -152,6 +153,8 @@ _gc_par_phases[i]->reset(); } } + + _ref_phase_times.reset(); } void G1GCPhaseTimes::note_gc_start() { @@ -285,13 +288,15 @@ } void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const { - Log(gc, phases) log; - Log(gc, phases, ref) log2; - if ((log.is_level(LogLevel::Debug) && !log2.is_level(LogLevel::Debug)) || - (log.is_level(LogLevel::Debug) && log2.is_level(LogLevel::Debug))) { - log.debug_stream()->print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value); - } else if (!log.is_level(LogLevel::Debug) && log2.is_level(LogLevel::Debug)) { - log2.debug_stream()->print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value); + LogTarget(Debug, gc, phases) lt; + LogTarget(Debug, gc, phases, ref) lt2; + + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value); + } else if (lt2.is_enabled()) { + LogStream ls(lt2); + ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value); } } @@ -382,7 +387,7 @@ trace_phase(_gc_par_phases[PreserveCMReferents]); debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms); - G1CollectedHeap::heap()->ref_phase_times()->print_all_references(2, false); + _ref_phase_times.print_all_references(2, false); if (G1StringDedup::is_enabled()) { debug_time("String Dedup Fixup", _cur_string_dedup_fixup_time_ms); @@ -399,7 +404,7 @@ } debug_time_for_reference("Reference Enqueuing", _cur_ref_enq_time_ms); - G1CollectedHeap::heap()->ref_phase_times()->print_enqueue_phase(2, false); + _ref_phase_times.print_enqueue_phase(2, false); debug_time("Merge Per-Thread State", _recorded_merge_pss_time_ms); debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms); --- old/src/share/vm/gc/g1/g1GCPhaseTimes.hpp 2017-08-03 16:11:08.045643102 -0700 +++ new/src/share/vm/gc/g1/g1GCPhaseTimes.hpp 2017-08-03 16:11:07.945643106 -0700 @@ -25,11 +25,13 @@ #ifndef SHARE_VM_GC_G1_G1GCPHASETIMES_HPP #define SHARE_VM_GC_G1_G1GCPHASETIMES_HPP +#include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "logging/logLevel.hpp" #include "memory/allocation.hpp" #include "utilities/macros.hpp" class LineBuffer; +class STWGCTimer; template class WorkerDataArray; @@ -151,6 +153,8 @@ double _cur_verify_before_time_ms; double _cur_verify_after_time_ms; + ReferenceProcessorPhaseTimes _ref_phase_times; + double worker_time(GCParPhases phase, uint worker); void note_gc_end(); void reset(); @@ -175,7 +179,7 @@ void print_other(double accounted_ms) const; public: - G1GCPhaseTimes(uint max_gc_threads); + G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads); void note_gc_start(); void print(); @@ -348,6 +352,8 @@ double fast_reclaim_humongous_time_ms() { return _cur_fast_reclaim_humongous_time_ms; } + + ReferenceProcessorPhaseTimes* ref_phase_times() { return &_ref_phase_times; } }; class G1GCParPhaseTimesTracker : public StackObj { --- old/src/share/vm/gc/g1/g1MarkSweep.cpp 2017-08-03 16:11:08.729643078 -0700 +++ new/src/share/vm/gc/g1/g1MarkSweep.cpp 2017-08-03 16:11:08.593643083 -0700 @@ -149,7 +149,7 @@ assert(rp == g1h->ref_processor_stw(), "Sanity"); rp->setup_policy(clear_all_softrefs); - ReferenceProcessorPhaseTimes pt(gc_timer(), rp->num_q(), rp->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(gc_timer(), rp->num_q()); const ReferenceProcessorStats& stats = rp->process_discovered_references(&GenMarkSweep::is_alive, --- old/src/share/vm/gc/parallel/psMarkSweep.cpp 2017-08-03 16:11:09.417643054 -0700 +++ new/src/share/vm/gc/parallel/psMarkSweep.cpp 2017-08-03 16:11:09.317643058 -0700 @@ -254,7 +254,7 @@ DerivedPointerTable::update_pointers(); #endif - ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q(), ref_processor()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q()); ref_processor()->enqueue_discovered_references(NULL, &pt); @@ -532,7 +532,7 @@ GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer); ref_processor()->setup_policy(clear_all_softrefs); - ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q(), ref_processor()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q()); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt); --- old/src/share/vm/gc/parallel/psParallelCompact.cpp 2017-08-03 16:11:10.125643030 -0700 +++ new/src/share/vm/gc/parallel/psParallelCompact.cpp 2017-08-03 16:11:09.993643034 -0700 @@ -1041,9 +1041,7 @@ DerivedPointerTable::update_pointers(); #endif - ReferenceProcessorPhaseTimes pt(&_gc_timer, - ref_processor()->num_q(), - ref_processor()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q()); ref_processor()->enqueue_discovered_references(NULL, &pt); @@ -2109,9 +2107,7 @@ GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); ReferenceProcessorStats stats; - ReferenceProcessorPhaseTimes pt(&_gc_timer, - ref_processor()->num_q(), - ref_processor()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q()); if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( --- old/src/share/vm/gc/parallel/psScavenge.cpp 2017-08-03 16:11:10.893643003 -0700 +++ new/src/share/vm/gc/parallel/psScavenge.cpp 2017-08-03 16:11:10.793643007 -0700 @@ -415,9 +415,7 @@ PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); ReferenceProcessorStats stats; - ReferenceProcessorPhaseTimes pt(&_gc_timer, - reference_processor()->num_q(), - reference_processor()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q()); if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( --- old/src/share/vm/gc/serial/defNewGeneration.cpp 2017-08-03 16:11:11.493642982 -0700 +++ new/src/share/vm/gc/serial/defNewGeneration.cpp 2017-08-03 16:11:11.393642986 -0700 @@ -661,7 +661,7 @@ FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); - ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q(), rp->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q()); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, &pt); --- old/src/share/vm/gc/serial/genMarkSweep.cpp 2017-08-03 16:11:12.189642958 -0700 +++ new/src/share/vm/gc/serial/genMarkSweep.cpp 2017-08-03 16:11:12.089642961 -0700 @@ -210,7 +210,7 @@ GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer()); ref_processor()->setup_policy(clear_all_softrefs); - ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q(), ref_processor()->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q()); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( &is_alive, &keep_alive, &follow_stack_closure, NULL, &pt); --- old/src/share/vm/gc/shared/genCollectedHeap.cpp 2017-08-03 16:11:12.853642935 -0700 +++ new/src/share/vm/gc/shared/genCollectedHeap.cpp 2017-08-03 16:11:12.749642938 -0700 @@ -386,7 +386,7 @@ } gen->collect(full, clear_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { - ReferenceProcessorPhaseTimes pt(NULL, rp->num_q(), rp->processing_is_mt()); + ReferenceProcessorPhaseTimes pt(NULL, rp->num_q()); rp->enqueue_discovered_references(NULL, &pt); pt.print_enqueue_phase(); } else { --- old/src/share/vm/gc/shared/referenceProcessorPhaseTimes.cpp 2017-08-03 16:11:13.501642912 -0700 +++ new/src/share/vm/gc/shared/referenceProcessorPhaseTimes.cpp 2017-08-03 16:11:13.361642917 -0700 @@ -27,6 +27,7 @@ #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/referenceProcessor.inline.hpp" #include "logging/log.hpp" +#include "logging/logStream.hpp" RefProcWorkerTimeTracker::RefProcWorkerTimeTracker(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers number, ReferenceProcessorPhaseTimes* phase_times, @@ -203,10 +204,8 @@ phase_times()->set_par_phase_time_ms(ReferenceProcessorPhaseTimes::RefEnqueue, elapsed); } -ReferenceProcessorPhaseTimes::ReferenceProcessorPhaseTimes(GCTimer* gc_timer, - uint max_gc_threads, - bool processing_is_mt) : - _gc_timer(gc_timer), _processing_is_mt(processing_is_mt) { +ReferenceProcessorPhaseTimes::ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads) : + _gc_timer(gc_timer), _processing_is_mt(false) { for (int i = 0; i < RefParPhaseMax; i++) { _worker_time_sec[i] = new WorkerDataArray(max_gc_threads, "Process lists (ms)"); @@ -312,7 +311,7 @@ _ref_enqueued[ref_type_2_index(ref_type)] = count; } -double ReferenceProcessorPhaseTimes::balance_queues_time_ms(ReferenceType ref_type) const{ +double ReferenceProcessorPhaseTimes::balance_queues_time_ms(ReferenceType ref_type) const { ASSERT_REF_TYPE(ref_type); return _balance_queues_time_ms[ref_type_2_index(ref_type)]; } @@ -323,40 +322,40 @@ } ReferenceProcessorPhaseTimes::RefProcParPhases -ReferenceProcessorPhaseTimes::par_phase(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers phase_number) const { +ReferenceProcessorPhaseTimes::par_phase(RefProcPhaseNumbers phase_number) const { ASSERT_PHASE_NUMBER(phase_number); ASSERT_REF_TYPE(_processing_ref_type); - int result = ReferenceProcessorPhaseTimes::SoftRefPhase1; + int result = SoftRefPhase1; switch(_processing_ref_type) { case REF_SOFT: - result = (int)ReferenceProcessorPhaseTimes::SoftRefPhase1; + result = (int)SoftRefPhase1; result += phase_number; - assert((RefProcParPhases)result >= ReferenceProcessorPhaseTimes::SoftRefPhase1 && - (RefProcParPhases)result <= ReferenceProcessorPhaseTimes::SoftRefPhase3, + assert((RefProcParPhases)result >= SoftRefPhase1 && + (RefProcParPhases)result <= SoftRefPhase3, "Invariant (%d)", result); break; case REF_WEAK: - result = (int)ReferenceProcessorPhaseTimes::WeakRefPhase2; + result = (int)WeakRefPhase2; result += (phase_number - 1); - assert((RefProcParPhases)result >= ReferenceProcessorPhaseTimes::WeakRefPhase2 && - (RefProcParPhases)result <= ReferenceProcessorPhaseTimes::WeakRefPhase3, + assert((RefProcParPhases)result >= WeakRefPhase2 && + (RefProcParPhases)result <= WeakRefPhase3, "Invariant (%d)", result); break; case REF_FINAL: - result = (int)ReferenceProcessorPhaseTimes::FinalRefPhase2; + result = (int)FinalRefPhase2; result += (phase_number - 1); - assert((RefProcParPhases)result >= ReferenceProcessorPhaseTimes::FinalRefPhase2 && - (RefProcParPhases)result <= ReferenceProcessorPhaseTimes::FinalRefPhase3, + assert((RefProcParPhases)result >= FinalRefPhase2 && + (RefProcParPhases)result <= FinalRefPhase3, "Invariant (%d)", result); break; case REF_PHANTOM: - result = (int)ReferenceProcessorPhaseTimes::PhantomRefPhase2; + result = (int)PhantomRefPhase2; result += (phase_number - 1); - assert((RefProcParPhases)result >= ReferenceProcessorPhaseTimes::PhantomRefPhase2 && - (RefProcParPhases)result <= ReferenceProcessorPhaseTimes::PhantomRefPhase3, + assert((RefProcParPhases)result >= PhantomRefPhase2 && + (RefProcParPhases)result <= PhantomRefPhase3, "Invariant (%d)", result); break; default: @@ -368,9 +367,9 @@ return (RefProcParPhases)result; } -void ReferenceProcessorPhaseTimes::print_enqueue_phase(uint base_indent, bool print_total) { +void ReferenceProcessorPhaseTimes::print_enqueue_phase(uint base_indent, bool print_total) const { if (print_total) { - print_phase(ReferenceProcessorPhaseTimes::RefEnqueue, base_indent); + print_phase(RefEnqueue, base_indent); } log_debug(gc, phases, ref)("%sReference Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT @@ -381,13 +380,14 @@ #define TIME_FORMAT "%.1lfms" -void ReferenceProcessorPhaseTimes::print_all_references(uint base_indent, bool print_total) { +void ReferenceProcessorPhaseTimes::print_all_references(uint base_indent, bool print_total) const { if (print_total) { - Log(gc, phases, ref) log; + LogTarget(Debug, gc, phases, ref) lt; - if (log.is_level(LogLevel::Debug)) { - log.debug_stream()->print_cr("%s%s: " TIME_FORMAT, - Indents[base_indent], "Reference Processing", total_time_ms()); + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_cr("%s%s: " TIME_FORMAT, + Indents[base_indent], "Reference Processing", total_time_ms()); } } @@ -398,69 +398,73 @@ print_reference(REF_PHANTOM, next_indent); } -void ReferenceProcessorPhaseTimes::print_reference(ReferenceType ref_type, uint base_indent) { - Log(gc, phases, ref) log; +void ReferenceProcessorPhaseTimes::print_reference(ReferenceType ref_type, uint base_indent) const { + LogTarget(Debug, gc, phases, ref) lt; - if (log.is_level(LogLevel::Debug)) { + if (lt.is_enabled()) { + LogStream ls(lt); uint next_indent = base_indent + 1; - outputStream* out = log.debug_stream(); ResourceMark rm; - out->print_cr("%s%s: " TIME_FORMAT, - Indents[base_indent], ref_type_2_string(ref_type), ref_proc_time_ms(ref_type)); + ls.print_cr("%s%s: " TIME_FORMAT, + Indents[base_indent], ref_type_2_string(ref_type), ref_proc_time_ms(ref_type)); double balance_time = balance_queues_time_ms(ref_type); if (balance_time != uninitialized()) { - out->print_cr("%s%s " TIME_FORMAT, Indents[next_indent], "Balance queues:", balance_time); + ls.print_cr("%s%s " TIME_FORMAT, Indents[next_indent], "Balance queues:", balance_time); } switch(ref_type) { case REF_SOFT: print_phase(SoftRefPhase1, next_indent); - print_phase(ReferenceProcessorPhaseTimes::SoftRefPhase2, next_indent); - print_phase(ReferenceProcessorPhaseTimes::SoftRefPhase3, next_indent); + print_phase(SoftRefPhase2, next_indent); + print_phase(SoftRefPhase3, next_indent); break; case REF_WEAK: - print_phase(ReferenceProcessorPhaseTimes::WeakRefPhase2, next_indent); - print_phase(ReferenceProcessorPhaseTimes::WeakRefPhase3, next_indent); + print_phase(WeakRefPhase2, next_indent); + print_phase(WeakRefPhase3, next_indent); break; case REF_FINAL: - print_phase(ReferenceProcessorPhaseTimes::FinalRefPhase2, next_indent); - print_phase(ReferenceProcessorPhaseTimes::FinalRefPhase3, next_indent); + print_phase(FinalRefPhase2, next_indent); + print_phase(FinalRefPhase3, next_indent); break; case REF_PHANTOM: - print_phase(ReferenceProcessorPhaseTimes::PhantomRefPhase2, next_indent); - print_phase(ReferenceProcessorPhaseTimes::PhantomRefPhase3, next_indent); + print_phase(PhantomRefPhase2, next_indent); + print_phase(PhantomRefPhase3, next_indent); break; default: ShouldNotReachHere(); } - out->print_cr("%s%s " SIZE_FORMAT, Indents[next_indent], "Discovered:", ref_discovered(ref_type)); - out->print_cr("%s%s " SIZE_FORMAT, Indents[next_indent], "Cleared:", ref_cleared(ref_type)); + ls.print_cr("%s%s " SIZE_FORMAT, Indents[next_indent], "Discovered:", ref_discovered(ref_type)); + ls.print_cr("%s%s " SIZE_FORMAT, Indents[next_indent], "Cleared:", ref_cleared(ref_type)); } } -void ReferenceProcessorPhaseTimes::print_phase(RefProcParPhases phase, uint indent) { +void ReferenceProcessorPhaseTimes::print_phase(RefProcParPhases phase, uint indent) const { double phase_time = par_phase_time_ms(phase); if (phase_time != uninitialized()) { - Log(gc, phases, ref) log; + LogTarget(Debug, gc, phases, ref) lt; - log.debug_stream()->print_cr("%s%s%s " TIME_FORMAT, - Indents[indent], - phase_enum_2_phase_string(phase), - indent == 0 ? "" : ":", /* 0 indent logs don't need colon. */ - phase_time); - if (processing_is_mt() && log.is_level(LogLevel::Trace)) { - outputStream* out = log.trace_stream(); + LogStream ls(lt); - out->print("%s", Indents[indent + 1]); + ls.print_cr("%s%s%s " TIME_FORMAT, + Indents[indent], + phase_enum_2_phase_string(phase), + indent == 0 ? "" : ":", /* 0 indent logs don't need colon. */ + phase_time); + + LogTarget(Trace, gc, phases, ref) lt2; + if (_processing_is_mt && lt2.is_enabled()) { + LogStream ls(lt2); + + ls.print("%s", Indents[indent + 1]); // worker_time_sec is recorded in seconds but it will be printed in milliseconds. - worker_time_sec(phase)->print_summary_on(out, true); + worker_time_sec(phase)->print_summary_on(&ls, true); } } } --- old/src/share/vm/gc/shared/referenceProcessorPhaseTimes.hpp 2017-08-03 16:11:14.049642893 -0700 +++ new/src/share/vm/gc/shared/referenceProcessorPhaseTimes.hpp 2017-08-03 16:11:13.949642897 -0700 @@ -28,6 +28,7 @@ #include "gc/shared/referenceProcessorStats.hpp" #include "gc/shared/workerDataArray.inline.hpp" #include "memory/referenceType.hpp" +#include "utilities/ticks.hpp" class DiscoveredList; class GCTimer; @@ -96,13 +97,11 @@ double balance_queues_time_ms(ReferenceType ref_type) const; - bool processing_is_mt() const { return _processing_is_mt; } - - void print_reference(ReferenceType ref_type, uint base_indent); - void print_phase(RefProcParPhases phase, uint indent); + void print_reference(ReferenceType ref_type, uint base_indent) const; + void print_phase(RefProcParPhases phase, uint indent) const; public: - ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads, bool processing_is_mt); + ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads); ~ReferenceProcessorPhaseTimes(); static double uninitialized() { return -1.0; } @@ -134,8 +133,8 @@ // Reset all fields. If not reset at next cycle, an assertion will fail. void reset(); - void print_enqueue_phase(uint base_indent = 0, bool print_total = true); - void print_all_references(uint base_indent = 0, bool print_total = true); + void print_enqueue_phase(uint base_indent = 0, bool print_total = true) const; + void print_all_references(uint base_indent = 0, bool print_total = true) const; }; // Updates working time of each worker thread. --- old/test/gc/g1/TestGCLogMessages.java 2017-08-03 16:11:14.577642875 -0700 +++ new/test/gc/g1/TestGCLogMessages.java 2017-08-03 16:11:14.477642878 -0700 @@ -140,6 +140,9 @@ // TLAB handling new LogMessageWithLevel("Prepare TLABs", Level.DEBUG), new LogMessageWithLevel("Resize TLABs", Level.DEBUG), + // Reference Processing + new LogMessageWithLevel("Reference Processing", Level.DEBUG), + new LogMessageWithLevel("Reference Enqueuing", Level.DEBUG), new LogMessageWithLevelC2OrJVMCIOnly("DerivedPointerTable Update", Level.DEBUG), new LogMessageWithLevel("Start New Collection Set", Level.DEBUG),