# HG changeset patch # User brutisso # Date 1441807210 -7200 # Wed Sep 09 16:00:10 2015 +0200 # Node ID c7f4ce4d186866e5d4286bdf4b06e79c598e459d # Parent 9810d3869392cbd83c4c712fe78fc68ee9837362 [mq]: webrev01 diff --git a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp +++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp @@ -42,6 +42,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" #include "gc/shared/collectorPolicy.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcPolicyCounters.hpp" #include "gc/shared/gcTimer.hpp" @@ -1593,7 +1594,7 @@ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); - GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id()); + GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); // Temporarily widen the span of the weak reference processing to // the entire heap. @@ -2825,7 +2826,6 @@ public: CMSPhaseAccounting(CMSCollector *collector, const char *phase, - const GCId gc_id, bool print_cr = true); ~CMSPhaseAccounting(); @@ -2834,7 +2834,6 @@ const char *_phase; elapsedTimer _wallclock; bool _print_cr; - const GCId _gc_id; public: // Not MT-safe; so do not pass around these StackObj's @@ -2850,15 +2849,14 @@ CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, const char *phase, - const GCId gc_id, bool print_cr) : - _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) { + _collector(collector), _phase(phase), _print_cr(print_cr) { if (PrintCMSStatistics != 0) { _collector->resetYields(); } if (PrintGCDetails) { - gclog_or_tty->gclog_stamp(_gc_id); + gclog_or_tty->gclog_stamp(); gclog_or_tty->print_cr("[%s-concurrent-%s-start]", _collector->cmsGen()->short_name(), _phase); } @@ -2872,7 +2870,7 @@ _collector->stopTimer(); _wallclock.stop(); if (PrintGCDetails) { - gclog_or_tty->gclog_stamp(_gc_id); + gclog_or_tty->gclog_stamp(); gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", _collector->cmsGen()->short_name(), _phase, _collector->timerValue(), _wallclock.seconds()); @@ -2951,7 +2949,7 @@ setup_cms_unloading_and_verification_state(); NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", - PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) + PrintGCDetails && Verbose, true, _gc_timer_cm);) // Reset all the PLAB chunk arrays if necessary. if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { @@ -3054,7 +3052,7 @@ CMSTokenSyncWithLocks ts(true, bitMapLock()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); + CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); bool res = markFromRootsWork(); if (res) { _collectorState = Precleaning; @@ -3751,7 +3749,7 @@ _start_sampling = false; } TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); + CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); } CMSTokenSync x(true); // is cms thread @@ -3780,7 +3778,7 @@ // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); + CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); // We need more smarts in the abortable preclean // loop below to deal with cases where allocation // in young gen is very very slow, and our precleaning @@ -3925,7 +3923,7 @@ GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases rp->preclean_discovered_references( rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, - gc_timer, _gc_tracer_cm->gc_id()); + gc_timer); } if (clean_survivor) { // preclean the active survivor space(s) @@ -4261,7 +4259,7 @@ // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", - PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) + PrintGCDetails && Verbose, true, _gc_timer_cm);) gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size @@ -4279,7 +4277,7 @@ } void CMSCollector::checkpointRootsFinalWork() { - NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) + NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -4329,11 +4327,10 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled) { - GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_parallel(); } else { - GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, - _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_non_parallel(); } } @@ -4341,7 +4338,7 @@ verify_overflow_empty(); { - NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) + NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) refProcessingWork(); } verify_work_stacks_empty(); @@ -5116,7 +5113,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -5153,7 +5150,7 @@ Universe::verify(); } { - GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5175,7 +5172,7 @@ } { - GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5194,7 +5191,7 @@ } { - GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5403,7 +5400,7 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { @@ -5428,15 +5425,13 @@ &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor, - _gc_timer_cm, - _gc_tracer_cm->gc_id()); + _gc_timer_cm); } else { stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, NULL, - _gc_timer_cm, - _gc_tracer_cm->gc_id()); + _gc_timer_cm); } _gc_tracer_cm->report_gc_reference_stats(stats); @@ -5447,7 +5442,7 @@ if (should_unload_classes()) { { - GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); @@ -5460,13 +5455,13 @@ } { - GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } { - GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } @@ -5534,7 +5529,7 @@ _intra_sweep_timer.start(); { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); + CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), @@ -5719,7 +5714,7 @@ // Clear the mark bitmap (no grey objects to start with) // for the next cycle. TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails); + CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); HeapWord* curAddr = _markBitMap.startWord(); while (curAddr < _markBitMap.endWord()) { @@ -5771,7 +5766,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id()); + GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); switch (op) { diff --git a/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp b/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp --- a/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp +++ b/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp @@ -26,6 +26,7 @@ #include "classfile/systemDictionary.hpp" #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" #include "gc/cms/concurrentMarkSweepThread.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" @@ -124,6 +125,7 @@ while (!_should_terminate) { sleepBeforeNextCycle(); if (_should_terminate) break; + GCIdMark gc_id_mark; GCCause::Cause cause = _collector->_full_gc_requested ? _collector->_full_gc_cause : GCCause::_cms_concurrent_mark; _collector->collect_in_background(cause); diff --git a/src/share/vm/gc/cms/parNewGeneration.cpp b/src/share/vm/gc/cms/parNewGeneration.cpp --- a/src/share/vm/gc/cms/parNewGeneration.cpp +++ b/src/share/vm/gc/cms/parNewGeneration.cpp @@ -896,7 +896,7 @@ size_policy->minor_collection_begin(); } - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id()); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -959,13 +959,13 @@ ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, &task_executor, - _gc_timer, _gc_tracer.gc_id()); + _gc_timer); } else { thread_state_set.flush(); gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, - _gc_timer, _gc_tracer.gc_id()); + _gc_timer); } _gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { diff --git a/src/share/vm/gc/cms/vmCMSOperations.cpp b/src/share/vm/gc/cms/vmCMSOperations.cpp --- a/src/share/vm/gc/cms/vmCMSOperations.cpp +++ b/src/share/vm/gc/cms/vmCMSOperations.cpp @@ -58,7 +58,7 @@ void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); + GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -70,7 +70,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); + GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -134,6 +134,7 @@ return; } HS_PRIVATE_CMS_INITMARK_BEGIN(); + GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); @@ -161,6 +162,7 @@ return; } HS_PRIVATE_CMS_REMARK_BEGIN(); + GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); diff --git a/src/share/vm/gc/cms/vmCMSOperations.hpp b/src/share/vm/gc/cms/vmCMSOperations.hpp --- a/src/share/vm/gc/cms/vmCMSOperations.hpp +++ b/src/share/vm/gc/cms/vmCMSOperations.hpp @@ -27,6 +27,7 @@ #include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/shared/gcCause.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/vmGCOperations.hpp" #include "runtime/vm_operations.hpp" @@ -53,6 +54,7 @@ protected: CMSCollector* _collector; // associated collector bool _prologue_succeeded; // whether doit_prologue succeeded + uint _gc_id; bool lost_race() const; @@ -63,7 +65,8 @@ public: VM_CMS_Operation(CMSCollector* collector): _collector(collector), - _prologue_succeeded(false) {} + _prologue_succeeded(false), + _gc_id(GCId::current()) {} ~VM_CMS_Operation() {} // The legal collector state for executing this CMS op. diff --git a/src/share/vm/gc/cms/yieldingWorkgroup.cpp b/src/share/vm/gc/cms/yieldingWorkgroup.cpp --- a/src/share/vm/gc/cms/yieldingWorkgroup.cpp +++ b/src/share/vm/gc/cms/yieldingWorkgroup.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/cms/yieldingWorkgroup.hpp" +#include "gc/shared/gcId.hpp" #include "utilities/macros.hpp" YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id) @@ -340,6 +341,7 @@ // Now, release the gang mutex and do the work. { MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag); + GCIdMark gc_id_mark(data.task()->gc_id()); data.task()->work(id); // This might include yielding } // Reacquire monitor and note completion of this worker diff --git a/src/share/vm/gc/g1/concurrentMark.cpp b/src/share/vm/gc/g1/concurrentMark.cpp --- a/src/share/vm/gc/g1/concurrentMark.cpp +++ b/src/share/vm/gc/g1/concurrentMark.cpp @@ -41,6 +41,7 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/suspendibleThreadSet.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" @@ -520,7 +521,6 @@ _has_overflown(false), _concurrent(false), _has_aborted(false), - _aborted_gc_id(GCId::undefined()), _restart_for_overflow(false), _concurrent_marking_in_progress(false), @@ -991,7 +991,7 @@ force_overflow()->update(); if (G1Log::fine()) { - gclog_or_tty->gclog_stamp(concurrent_gc_id()); + gclog_or_tty->gclog_stamp(); gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); } } @@ -1181,7 +1181,7 @@ // should not attempt to do any further work. if (root_regions()->scan_in_progress()) { if (G1Log::fine()) { - gclog_or_tty->gclog_stamp(concurrent_gc_id()); + gclog_or_tty->gclog_stamp(); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); } @@ -1195,7 +1195,7 @@ _parallel_workers->run_task(&task); if (G1Log::fine()) { - gclog_or_tty->gclog_stamp(concurrent_gc_id()); + gclog_or_tty->gclog_stamp(); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start); } @@ -1245,8 +1245,7 @@ public: G1CMTraceTime(const char* title, bool doit) - : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), - G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { + : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) { } }; @@ -2391,8 +2390,7 @@ &g1_keep_alive, &g1_drain_mark_stack, executor, - g1h->gc_timer_cm(), - concurrent_gc_id()); + g1h->gc_timer_cm()); g1h->gc_tracer_cm()->report_gc_reference_stats(stats); // The do_oop work routines of the keep_alive and drain_marking_stack @@ -2970,7 +2968,6 @@ // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. return; } - // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next // concurrent bitmap clearing. _nextMarkBitMap->clearAll(); @@ -2988,8 +2985,6 @@ } _first_overflow_barrier_sync.abort(); _second_overflow_barrier_sync.abort(); - _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id(); - assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?"); _has_aborted = true; SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); @@ -3004,13 +2999,6 @@ _g1h->register_concurrent_cycle_end(); } -const GCId& ConcurrentMark::concurrent_gc_id() { - if (has_aborted()) { - return _aborted_gc_id; - } - return _g1h->gc_tracer_cm()->gc_id(); -} - static void print_ms_time_info(const char* prefix, const char* name, NumberSeq& ns) { gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", diff --git a/src/share/vm/gc/g1/concurrentMark.hpp b/src/share/vm/gc/g1/concurrentMark.hpp --- a/src/share/vm/gc/g1/concurrentMark.hpp +++ b/src/share/vm/gc/g1/concurrentMark.hpp @@ -425,7 +425,6 @@ volatile bool _concurrent; // Set at the end of a Full GC so that marking aborts volatile bool _has_aborted; - GCId _aborted_gc_id; // Used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start @@ -768,8 +767,6 @@ bool has_aborted() { return _has_aborted; } - const GCId& concurrent_gc_id(); - // This prints the global/local fingers. It is used for debugging. NOT_PRODUCT(void print_finger();) diff --git a/src/share/vm/gc/g1/concurrentMarkThread.cpp b/src/share/vm/gc/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc/g1/concurrentMarkThread.cpp +++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp @@ -30,6 +30,7 @@ #include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/vm_operations_g1.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcTrace.hpp" #include "memory/resourceArea.hpp" #include "runtime/vmThread.hpp" @@ -85,7 +86,7 @@ SuspendibleThreadSetJoiner sts_joiner(join_sts); va_list args; va_start(args, fmt); - gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); + gclog_or_tty->gclog_stamp(); gclog_or_tty->vprint_cr(fmt, args); va_end(args); } @@ -108,6 +109,7 @@ break; } + GCIdMark gc_id_mark; { ResourceMark rm; HandleMark hm; diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -53,6 +53,7 @@ #include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/vm_operations_g1.hpp" #include "gc/shared/gcHeapSummary.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -1457,6 +1458,7 @@ gc_timer->register_gc_start(); SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer(); + GCIdMark gc_id_mark; gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); SvcGCMarker sgcm(SvcGCMarker::FULL); @@ -1483,7 +1485,7 @@ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); { - GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id()); + GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); @@ -3932,7 +3934,7 @@ return; } - gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); + gclog_or_tty->gclog_stamp(); GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)") @@ -3990,6 +3992,7 @@ _gc_timer_stw->register_gc_start(); + GCIdMark gc_id_mark; _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); SvcGCMarker sgcm(SvcGCMarker::MINOR); @@ -5565,8 +5568,7 @@ &keep_alive, &drain_queue, NULL, - _gc_timer_stw, - _gc_tracer_stw->gc_id()); + _gc_timer_stw); } else { // Parallel reference processing assert(rp->num_q() == no_of_gc_workers, "sanity"); @@ -5577,8 +5579,7 @@ &keep_alive, &drain_queue, &par_task_executor, - _gc_timer_stw, - _gc_tracer_stw->gc_id()); + _gc_timer_stw); } _gc_tracer_stw->report_gc_reference_stats(stats); diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.cpp b/src/share/vm/gc/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp @@ -857,7 +857,7 @@ _cur_mark_stop_world_time_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms; - _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, _g1->gc_tracer_cm()->gc_id()); + _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec); } void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { @@ -952,8 +952,7 @@ collector_state()->set_initiate_conc_mark_if_possible(true); } - _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, - end_time_sec, _g1->gc_tracer_stw()->gc_id()); + _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec); if (update_stats) { _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); @@ -1586,7 +1585,7 @@ _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); _cur_mark_stop_world_time_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms; - _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, _g1->gc_tracer_cm()->gc_id()); + _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec); } // Add the heap region at the head of the non-incremental collection set diff --git a/src/share/vm/gc/g1/g1MMUTracker.cpp b/src/share/vm/gc/g1/g1MMUTracker.cpp --- a/src/share/vm/gc/g1/g1MMUTracker.cpp +++ b/src/share/vm/gc/g1/g1MMUTracker.cpp @@ -76,7 +76,7 @@ return gc_time; } -void G1MMUTrackerQueue::add_pause(double start, double end, const GCId& gcId) { +void G1MMUTrackerQueue::add_pause(double start, double end) { double duration = end - start; remove_expired_entries(end); @@ -106,7 +106,7 @@ // Current entry needs to be added before calculating the value double slice_time = calculate_gc_time(end); - G1MMUTracer::report_mmu(gcId, _time_slice, slice_time, _max_gc_time); + G1MMUTracer::report_mmu(_time_slice, slice_time, _max_gc_time); } // basically the _internal call does not remove expired entries diff --git a/src/share/vm/gc/g1/g1MMUTracker.hpp b/src/share/vm/gc/g1/g1MMUTracker.hpp --- a/src/share/vm/gc/g1/g1MMUTracker.hpp +++ b/src/share/vm/gc/g1/g1MMUTracker.hpp @@ -43,7 +43,7 @@ public: G1MMUTracker(double time_slice, double max_gc_time); - virtual void add_pause(double start, double end, const GCId& gcId) = 0; + virtual void add_pause(double start, double end) = 0; virtual double when_sec(double current_time, double pause_time) = 0; double max_gc_time() { @@ -127,7 +127,7 @@ public: G1MMUTrackerQueue(double time_slice, double max_gc_time); - virtual void add_pause(double start, double end, const GCId& gcId); + virtual void add_pause(double start, double end); virtual double when_sec(double current_time, double pause_time); }; diff --git a/src/share/vm/gc/g1/g1MarkSweep.cpp b/src/share/vm/gc/g1/g1MarkSweep.cpp --- a/src/share/vm/gc/g1/g1MarkSweep.cpp +++ b/src/share/vm/gc/g1/g1MarkSweep.cpp @@ -121,7 +121,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); + GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer()); G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -146,8 +146,7 @@ &GenMarkSweep::keep_alive, &GenMarkSweep::follow_stack_closure, NULL, - gc_timer(), - gc_tracer()->gc_id()); + gc_timer()); gc_tracer()->report_gc_reference_stats(stats); @@ -200,7 +199,7 @@ // phase2, phase3 and phase4, but the ValidateMarkSweep live oops // tracking expects us to do so. See comment under phase4. - GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); + GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer()); prepare_compaction(); } @@ -233,7 +232,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); + GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer()); // Need cleared claim bits for the roots processing ClassLoaderDataGraph::clear_claimed_marks(); @@ -294,7 +293,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); + GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer()); G1SpaceCompactClosure blk; g1h->heap_region_iterate(&blk); diff --git a/src/share/vm/gc/g1/vm_operations_g1.cpp b/src/share/vm/gc/g1/vm_operations_g1.cpp --- a/src/share/vm/gc/g1/vm_operations_g1.cpp +++ b/src/share/vm/gc/g1/vm_operations_g1.cpp @@ -26,6 +26,7 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" +#include "gc/shared/gcId.hpp" #include "gc/g1/g1Log.hpp" #include "gc/g1/vm_operations_g1.hpp" #include "gc/shared/gcTimer.hpp" @@ -227,7 +228,8 @@ void VM_CGC_Operation::doit() { TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id()); + GCIdMark gc_id_mark(_gc_id); + GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm()); IsGCActiveMark x; _cl->do_void(); } diff --git a/src/share/vm/gc/g1/vm_operations_g1.hpp b/src/share/vm/gc/g1/vm_operations_g1.hpp --- a/src/share/vm/gc/g1/vm_operations_g1.hpp +++ b/src/share/vm/gc/g1/vm_operations_g1.hpp @@ -26,6 +26,7 @@ #define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP #include "gc/g1/g1AllocationContext.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/vmGCOperations.hpp" // VM_operations for the G1 collector. @@ -104,6 +105,7 @@ VoidClosure* _cl; const char* _printGCMessage; bool _needs_pll; + uint _gc_id; protected: // java.lang.ref.Reference support @@ -112,7 +114,7 @@ public: VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll) - : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { } + : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { } virtual VMOp_Type type() const { return VMOp_CGC_Operation; } virtual void doit(); virtual bool doit_prologue(); diff --git a/src/share/vm/gc/parallel/pcTasks.cpp b/src/share/vm/gc/parallel/pcTasks.cpp --- a/src/share/vm/gc/parallel/pcTasks.cpp +++ b/src/share/vm/gc/parallel/pcTasks.cpp @@ -53,7 +53,7 @@ ResourceMark rm; NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -82,7 +82,7 @@ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); @@ -153,7 +153,7 @@ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("RefProcTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); @@ -209,7 +209,7 @@ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -241,7 +241,7 @@ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -308,7 +308,7 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -323,7 +323,7 @@ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); diff --git a/src/share/vm/gc/parallel/psMarkSweep.cpp b/src/share/vm/gc/parallel/psMarkSweep.cpp --- a/src/share/vm/gc/parallel/psMarkSweep.cpp +++ b/src/share/vm/gc/parallel/psMarkSweep.cpp @@ -36,6 +36,7 @@ #include "gc/serial/markSweep.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcHeapSummary.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -113,6 +114,7 @@ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); + GCIdMark gc_id_mark; _gc_timer->register_gc_start(); _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); @@ -165,7 +167,7 @@ HandleMark hm; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -508,7 +510,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); @@ -541,7 +543,7 @@ ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( - is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); gc_tracer()->report_gc_reference_stats(stats); } @@ -567,7 +569,7 @@ void PSMarkSweep::mark_sweep_phase2() { - GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); // Now all live objects are marked, compute the new object addresses. @@ -594,7 +596,7 @@ void PSMarkSweep::mark_sweep_phase3() { // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); @@ -634,7 +636,7 @@ void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); - GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); // All pointers are now adjusted, move objects accordingly diff --git a/src/share/vm/gc/parallel/psParallelCompact.cpp b/src/share/vm/gc/parallel/psParallelCompact.cpp --- a/src/share/vm/gc/parallel/psParallelCompact.cpp +++ b/src/share/vm/gc/parallel/psParallelCompact.cpp @@ -40,6 +40,7 @@ #include "gc/parallel/psYoungGen.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcHeapSummary.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -960,7 +961,7 @@ // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of young // collections will have swapped the spaces an unknown number of times. - GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); @@ -1003,7 +1004,7 @@ void PSParallelCompact::post_compact() { - GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. @@ -1824,7 +1825,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { - GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); // trace("2"); #ifdef ASSERT @@ -1984,6 +1985,7 @@ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); + GCIdMark gc_id_mark; _gc_timer.register_gc_start(); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); @@ -2031,7 +2033,7 @@ gc_task_manager()->task_idle_workers(); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -2331,7 +2333,7 @@ bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them - GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); @@ -2346,7 +2348,7 @@ ClassLoaderDataGraph::clear_claimed_marks(); { - GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; @@ -2375,24 +2377,24 @@ // Process reference objects found during marking { - GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, - &task_executor, &_gc_timer, _gc_tracer.gc_id()); + &task_executor, &_gc_timer); } else { stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, - &_gc_timer, _gc_tracer.gc_id()); + &_gc_timer); } gc_tracer->report_gc_reference_stats(stats); } - GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); @@ -2423,7 +2425,7 @@ void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations - GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2459,7 +2461,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); // Find the threads that are active unsigned int which = 0; @@ -2533,7 +2535,7 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); ParallelCompactData& sd = PSParallelCompact::summary_data(); @@ -2615,7 +2617,7 @@ GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { - GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); // Once a thread has drained it's stack, it should try to steal regions from // other threads. @@ -2663,7 +2665,7 @@ void PSParallelCompact::compact() { // trace("5"); - GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); @@ -2679,7 +2681,7 @@ enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { - GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); gc_task_manager()->execute_and_wait(q); @@ -2693,7 +2695,7 @@ { // Update the deferred objects, if any. Any compaction manager can be used. - GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); diff --git a/src/share/vm/gc/parallel/psScavenge.cpp b/src/share/vm/gc/parallel/psScavenge.cpp --- a/src/share/vm/gc/parallel/psScavenge.cpp +++ b/src/share/vm/gc/parallel/psScavenge.cpp @@ -36,6 +36,7 @@ #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcHeapSummary.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -278,6 +279,7 @@ return false; } + GCIdMark gc_id_mark; _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); bool promotion_failure_occurred = false; @@ -322,7 +324,7 @@ HandleMark hm; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); + GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); @@ -387,7 +389,7 @@ // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { - GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -429,7 +431,7 @@ // Process reference objects discovered during scavenge { - GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("References", false, false, &_gc_timer); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); @@ -440,10 +442,10 @@ PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, - &_gc_timer, _gc_tracer.gc_id()); + &_gc_timer); } else { stats = reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id()); + &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); } _gc_tracer.report_gc_reference_stats(stats); @@ -458,7 +460,7 @@ } { - GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("StringTable", false, false, &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); @@ -628,7 +630,7 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { - GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id()); + GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); CodeCache::prune_scavenge_root_nmethods(); } diff --git a/src/share/vm/gc/serial/defNewGeneration.cpp b/src/share/vm/gc/serial/defNewGeneration.cpp --- a/src/share/vm/gc/serial/defNewGeneration.cpp +++ b/src/share/vm/gc/serial/defNewGeneration.cpp @@ -583,7 +583,7 @@ init_assuming_no_promotion_failure(); - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -646,7 +646,7 @@ rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, - NULL, _gc_timer, gc_tracer.gc_id()); + NULL, _gc_timer); gc_tracer.report_gc_reference_stats(stats); if (!_promotion_failed) { diff --git a/src/share/vm/gc/serial/genMarkSweep.cpp b/src/share/vm/gc/serial/genMarkSweep.cpp --- a/src/share/vm/gc/serial/genMarkSweep.cpp +++ b/src/share/vm/gc/serial/genMarkSweep.cpp @@ -70,7 +70,7 @@ set_ref_processor(rp); rp->setup_policy(clear_all_softrefs); - GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id()); + GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); gch->trace_heap_before_gc(_gc_tracer); @@ -186,7 +186,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -217,7 +217,7 @@ ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( - &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id()); + &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); gc_tracer()->report_gc_reference_stats(stats); } @@ -259,7 +259,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer); gch->prepare_for_compaction(); } @@ -275,7 +275,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer); // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); @@ -327,7 +327,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); GenCompactClosure blk; gch->generation_iterate(&blk, true); diff --git a/src/share/vm/gc/shared/collectedHeap.cpp b/src/share/vm/gc/shared/collectedHeap.cpp --- a/src/share/vm/gc/shared/collectedHeap.cpp +++ b/src/share/vm/gc/shared/collectedHeap.cpp @@ -573,13 +573,13 @@ void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { if (HeapDumpBeforeFullGC) { - GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create()); + GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); // We are doing a full collection and a heap dump before // full collection has been requested. HeapDumper::dump_heap(); } if (PrintClassHistogramBeforeFullGC) { - GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create()); + GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } @@ -587,11 +587,11 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) { if (HeapDumpAfterFullGC) { - GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create()); + GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); HeapDumper::dump_heap(); } if (PrintClassHistogramAfterFullGC) { - GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create()); + GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } diff --git a/src/share/vm/gc/shared/gcId.cpp b/src/share/vm/gc/shared/gcId.cpp --- a/src/share/vm/gc/shared/gcId.cpp +++ b/src/share/vm/gc/shared/gcId.cpp @@ -25,18 +25,37 @@ #include "precompiled.hpp" #include "gc/shared/gcId.hpp" #include "runtime/safepoint.hpp" +#include "runtime/thread.inline.hpp" uint GCId::_next_id = 0; -const GCId GCId::create() { - return GCId(_next_id++); +NamedThread* currentNamedthread() { + assert(Thread::current()->is_Named_thread(), "This thread must be NamedThread"); + return (NamedThread*)Thread::current(); } -const GCId GCId::peek() { - return GCId(_next_id); + +const uint GCId::create() { + return _next_id++; } -const GCId GCId::undefined() { - return GCId(UNDEFINED); + +const uint GCId::current() { + assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id."); + return current_raw(); } -bool GCId::is_undefined() const { - return _id == UNDEFINED; + +const uint GCId::current_raw() { + return currentNamedthread()->gc_id(); } + +GCIdMark::GCIdMark() : _gc_id(GCId::create()) { + currentNamedthread()->set_gc_id(_gc_id); +} + +GCIdMark::GCIdMark(uint gc_id) : _gc_id(gc_id) { + currentNamedthread()->set_gc_id(_gc_id); +} + +GCIdMark::~GCIdMark() { + currentNamedthread()->set_gc_id(GCId::undefined()); +} + diff --git a/src/share/vm/gc/shared/gcId.hpp b/src/share/vm/gc/shared/gcId.hpp --- a/src/share/vm/gc/shared/gcId.hpp +++ b/src/share/vm/gc/shared/gcId.hpp @@ -27,25 +27,26 @@ #include "memory/allocation.hpp" -class GCId VALUE_OBJ_CLASS_SPEC { - private: - uint _id; - GCId(uint id) : _id(id) {} - GCId() { } // Unused - +class GCId : public AllStatic { + friend class GCIdMark; static uint _next_id; static const uint UNDEFINED = (uint)-1; + static const uint create(); public: - uint id() const { - assert(_id != UNDEFINED, "Using undefined GC ID"); - return _id; - } - bool is_undefined() const; + // Returns the currently active GC id. Asserts that there is an active GC id. + static const uint current(); + // Same as current() but can return undefined() if no GC id is currently active + static const uint current_raw(); + static const uint undefined() { return UNDEFINED; } +}; - static const GCId create(); - static const GCId peek(); - static const GCId undefined(); +class GCIdMark { + uint _gc_id; + public: + GCIdMark(); + GCIdMark(uint gc_id); + ~GCIdMark(); }; #endif // SHARE_VM_GC_SHARED_GCID_HPP diff --git a/src/share/vm/gc/shared/gcTrace.cpp b/src/share/vm/gc/shared/gcTrace.cpp --- a/src/share/vm/gc/shared/gcTrace.cpp +++ b/src/share/vm/gc/shared/gcTrace.cpp @@ -40,31 +40,16 @@ #include "gc/g1/evacuationInfo.hpp" #endif -#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?") -#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?") - void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) { - assert_unset_gc_id(); - - GCId gc_id = GCId::create(); - _shared_gc_info.set_gc_id(gc_id); _shared_gc_info.set_cause(cause); _shared_gc_info.set_start_timestamp(timestamp); } void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) { - assert_unset_gc_id(); - report_gc_start_impl(cause, timestamp); } -bool GCTracer::has_reported_gc_start() const { - return !_shared_gc_info.gc_id().is_undefined(); -} - void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { - assert_set_gc_id(); - _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses()); _shared_gc_info.set_longest_pause(time_partitions->longest_pause()); _shared_gc_info.set_end_timestamp(timestamp); @@ -74,16 +59,10 @@ } void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) { - assert_set_gc_id(); - report_gc_end_impl(timestamp, time_partitions); - - _shared_gc_info.set_gc_id(GCId::undefined()); } void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const { - assert_set_gc_id(); - send_reference_stats_event(REF_SOFT, rps.soft_count()); send_reference_stats_event(REF_WEAK, rps.weak_count()); send_reference_stats_event(REF_FINAL, rps.final_count()); @@ -94,14 +73,12 @@ #if INCLUDE_SERVICES class ObjectCountEventSenderClosure : public KlassInfoClosure { - const GCId _gc_id; const double _size_threshold_percentage; const size_t _total_size_in_words; const Ticks _timestamp; public: - ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, const Ticks& timestamp) : - _gc_id(gc_id), + ObjectCountEventSenderClosure(size_t total_size_in_words, const Ticks& timestamp) : _size_threshold_percentage(ObjectCountCutOffPercent / 100), _total_size_in_words(total_size_in_words), _timestamp(timestamp) @@ -109,7 +86,7 @@ virtual void do_cinfo(KlassInfoEntry* entry) { if (should_send_event(entry)) { - ObjectCountEventSender::send(entry, _gc_id, _timestamp); + ObjectCountEventSender::send(entry, _timestamp); } } @@ -121,7 +98,6 @@ }; void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) { - assert_set_gc_id(); assert(is_alive_cl != NULL, "Must supply function to check liveness"); if (ObjectCountEventSender::should_send_event()) { @@ -131,7 +107,7 @@ if (!cit.allocation_failed()) { HeapInspection hi(false, false, false, NULL); hi.populate_table(&cit, is_alive_cl); - ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now()); + ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now()); cit.iterate(&event_sender); } } @@ -139,14 +115,10 @@ #endif // INCLUDE_SERVICES void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const { - assert_set_gc_id(); - send_gc_heap_summary_event(when, heap_summary); } void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const { - assert_set_gc_id(); - send_meta_space_summary_event(when, summary); send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary()); @@ -156,7 +128,6 @@ } void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { - assert_set_gc_id(); assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported"); GCTracer::report_gc_end_impl(timestamp, time_partitions); @@ -166,8 +137,6 @@ } void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const { - assert_set_gc_id(); - send_promotion_failed_event(pf_info); } @@ -191,78 +160,56 @@ void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, uint age, bool tenured, size_t plab_size) const { - assert_set_gc_id(); send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size); } void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size, uint age, bool tenured) const { - assert_set_gc_id(); send_promotion_outside_plab_event(klass, obj_size, age, tenured); } void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { - assert_set_gc_id(); - GCTracer::report_gc_end_impl(timestamp, time_partitions); send_old_gc_event(); } void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { - assert_set_gc_id(); - OldGCTracer::report_gc_end_impl(timestamp, time_partitions); send_parallel_old_event(); } void ParallelOldTracer::report_dense_prefix(void* dense_prefix) { - assert_set_gc_id(); - _parallel_old_gc_info.report_dense_prefix(dense_prefix); } void OldGCTracer::report_concurrent_mode_failure() { - assert_set_gc_id(); - send_concurrent_mode_failure_event(); } #if INCLUDE_ALL_GCS -void G1MMUTracer::report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime) { - assert(!gcId.is_undefined(), "Undefined GC id"); - - send_g1_mmu_event(gcId, timeSlice, gcTime, maxTime); +void G1MMUTracer::report_mmu(double timeSlice, double gcTime, double maxTime) { + send_g1_mmu_event(timeSlice, gcTime, maxTime); } void G1NewTracer::report_yc_type(G1YCType type) { - assert_set_gc_id(); - _g1_young_gc_info.set_type(type); } void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { - assert_set_gc_id(); - YoungGCTracer::report_gc_end_impl(timestamp, time_partitions); send_g1_young_gc_event(); } void G1NewTracer::report_evacuation_info(EvacuationInfo* info) { - assert_set_gc_id(); - send_evacuation_info_event(info); } void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) { - assert_set_gc_id(); - send_evacuation_failed_event(ef_info); ef_info.reset(); } void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const { - assert_set_gc_id(); - send_young_evacuation_statistics(young_summary); send_old_evacuation_statistics(old_summary); } diff --git a/src/share/vm/gc/shared/gcTrace.hpp b/src/share/vm/gc/shared/gcTrace.hpp --- a/src/share/vm/gc/shared/gcTrace.hpp +++ b/src/share/vm/gc/shared/gcTrace.hpp @@ -52,7 +52,6 @@ class SharedGCInfo VALUE_OBJ_CLASS_SPEC { private: - GCId _gc_id; GCName _name; GCCause::Cause _cause; Ticks _start_timestamp; @@ -62,7 +61,6 @@ public: SharedGCInfo(GCName name) : - _gc_id(GCId::undefined()), _name(name), _cause(GCCause::_last_gc_cause), _start_timestamp(), @@ -71,9 +69,6 @@ _longest_pause() { } - void set_gc_id(GCId gc_id) { _gc_id = gc_id; } - const GCId& gc_id() const { return _gc_id; } - void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; } const Ticks start_timestamp() const { return _start_timestamp; } @@ -128,8 +123,6 @@ void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const; void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; - bool has_reported_gc_start() const; - const GCId& gc_id() { return _shared_gc_info.gc_id(); } protected: GCTracer(GCName name) : _shared_gc_info(name) {} @@ -242,10 +235,10 @@ #if INCLUDE_ALL_GCS class G1MMUTracer : public AllStatic { - static void send_g1_mmu_event(const GCId& gcId, double timeSlice, double gcTime, double maxTime); + static void send_g1_mmu_event(double timeSlice, double gcTime, double maxTime); public: - static void report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime); + static void report_mmu(double timeSlice, double gcTime, double maxTime); }; class G1NewTracer : public YoungGCTracer { diff --git a/src/share/vm/gc/shared/gcTraceSend.cpp b/src/share/vm/gc/shared/gcTraceSend.cpp --- a/src/share/vm/gc/shared/gcTraceSend.cpp +++ b/src/share/vm/gc/shared/gcTraceSend.cpp @@ -44,7 +44,7 @@ void GCTracer::send_garbage_collection_event() const { EventGCGarbageCollection event(UNTIMED); if (event.should_commit()) { - event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_gcId(GCId::current()); event.set_name(_shared_gc_info.name()); event.set_cause((u2) _shared_gc_info.cause()); event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); @@ -58,7 +58,7 @@ void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { EventGCReferenceStatistics e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_type((u1)type); e.set_count(count); e.commit(); @@ -69,7 +69,7 @@ const MetaspaceChunkFreeListSummary& summary) const { EventMetaspaceChunkFreeListSummary e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_when(when); e.set_metadataType(mdtype); @@ -92,7 +92,7 @@ void ParallelOldTracer::send_parallel_old_event() const { EventGCParallelOld e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -103,7 +103,7 @@ void YoungGCTracer::send_young_gc_event() const { EventGCYoungGarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_tenuringThreshold(_tenuring_threshold); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -125,7 +125,7 @@ EventPromoteObjectInNewPLAB event; if (event.should_commit()) { - event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_gcId(GCId::current()); event.set_class(klass); event.set_objectSize(obj_size); event.set_tenured(tenured); @@ -140,7 +140,7 @@ EventPromoteObjectOutsidePLAB event; if (event.should_commit()) { - event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_gcId(GCId::current()); event.set_class(klass); event.set_objectSize(obj_size); event.set_tenured(tenured); @@ -152,7 +152,7 @@ void OldGCTracer::send_old_gc_event() const { EventGCOldGarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); @@ -171,7 +171,7 @@ void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { EventPromotionFailed e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_data(to_trace_struct(pf_info)); e.set_thread(pf_info.thread()->thread_id()); e.commit(); @@ -182,7 +182,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() { EventConcurrentModeFailure e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.commit(); } } @@ -191,7 +191,7 @@ void G1NewTracer::send_g1_young_gc_event() { EventGCG1GarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_type(_g1_young_gc_info.type()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -199,10 +199,10 @@ } } -void G1MMUTracer::send_g1_mmu_event(const GCId& gcId, double timeSlice, double gcTime, double maxTime) { +void G1MMUTracer::send_g1_mmu_event(double timeSlice, double gcTime, double maxTime) { EventGCG1MMU e; if (e.should_commit()) { - e.set_gcId(gcId.id()); + e.set_gcId(GCId::current()); e.set_timeSlice(timeSlice); e.set_gcTime(gcTime); e.set_maxGcTime(maxTime); @@ -213,7 +213,7 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { EventEvacuationInfo e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_cSetRegions(info->collectionset_regions()); e.set_cSetUsedBefore(info->collectionset_used_before()); e.set_cSetUsedAfter(info->collectionset_used_after()); @@ -229,7 +229,7 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { EventEvacuationFailed e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_data(to_trace_struct(ef_info)); e.commit(); } @@ -253,7 +253,7 @@ void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const { EventGCG1EvacuationYoungStatistics surv_evt; if (surv_evt.should_commit()) { - surv_evt.set_stats(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); + surv_evt.set_stats(create_g1_evacstats(GCId::current(), summary)); surv_evt.commit(); } } @@ -261,7 +261,7 @@ void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const { EventGCG1EvacuationOldStatistics old_evt; if (old_evt.should_commit()) { - old_evt.set_stats(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); + old_evt.set_stats(create_g1_evacstats(GCId::current(), summary)); old_evt.commit(); } } @@ -287,17 +287,16 @@ } class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { - GCId _gc_id; GCWhen::Type _when; public: - GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {} + GCHeapSummaryEventSender(GCWhen::Type when) : _when(when) {} void visit(const GCHeapSummary* heap_summary) const { const VirtualSpaceSummary& heap_space = heap_summary->heap(); EventGCHeapSummary e; if (e.should_commit()) { - e.set_gcId(_gc_id.id()); + e.set_gcId(GCId::current()); e.set_when((u1)_when); e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapUsed(heap_summary->used()); @@ -310,7 +309,7 @@ EventG1HeapSummary e; if (e.should_commit()) { - e.set_gcId(_gc_id.id()); + e.set_gcId(GCId::current()); e.set_when((u1)_when); e.set_edenUsedSize(g1_heap_summary->edenUsed()); e.set_edenTotalSize(g1_heap_summary->edenCapacity()); @@ -331,7 +330,7 @@ EventPSHeapSummary e; if (e.should_commit()) { - e.set_gcId(_gc_id.id()); + e.set_gcId(GCId::current()); e.set_when((u1)_when); e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); @@ -346,7 +345,7 @@ }; void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { - GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when); + GCHeapSummaryEventSender visitor(when); heap_summary.accept(&visitor); } @@ -363,7 +362,7 @@ void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { EventMetaspaceSummary e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.gc_id().id()); + e.set_gcId(GCId::current()); e.set_when((u1) when); e.set_gcThreshold(meta_space_summary.capacity_until_GC()); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); @@ -374,15 +373,12 @@ } class PhaseSender : public PhaseVisitor { - GCId _gc_id; public: - PhaseSender(GCId gc_id) : _gc_id(gc_id) {} - template void send_phase(PausePhase* pause) { T event(UNTIMED); if (event.should_commit()) { - event.set_gcId(_gc_id.id()); + event.set_gcId(GCId::current()); event.set_name(pause->name()); event.set_starttime(pause->start()); event.set_endtime(pause->end()); @@ -406,7 +402,7 @@ }; void GCTracer::send_phase_events(TimePartitions* time_partitions) const { - PhaseSender phase_reporter(_shared_gc_info.gc_id()); + PhaseSender phase_reporter; TimePartitionPhasesIterator iter(time_partitions); while (iter.has_next()) { diff --git a/src/share/vm/gc/shared/gcTraceTime.cpp b/src/share/vm/gc/shared/gcTraceTime.cpp --- a/src/share/vm/gc/shared/gcTraceTime.cpp +++ b/src/share/vm/gc/shared/gcTraceTime.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" @@ -35,7 +36,7 @@ #include "utilities/ticks.inline.hpp" -GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) : +GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) : _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() { if (_doit || _timer != NULL) { _start_counter.stamp(); @@ -49,11 +50,7 @@ } if (_doit) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); - if (PrintGCID) { - gclog_or_tty->print("#%u: ", gc_id.id()); - } + gclog_or_tty->gclog_stamp(); gclog_or_tty->print("[%s", title); gclog_or_tty->flush(); } diff --git a/src/share/vm/gc/shared/gcTraceTime.hpp b/src/share/vm/gc/shared/gcTraceTime.hpp --- a/src/share/vm/gc/shared/gcTraceTime.hpp +++ b/src/share/vm/gc/shared/gcTraceTime.hpp @@ -39,7 +39,7 @@ Ticks _start_counter; public: - GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id); + GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer); ~GCTraceTime(); }; diff --git a/src/share/vm/gc/shared/genCollectedHeap.cpp b/src/share/vm/gc/shared/genCollectedHeap.cpp --- a/src/share/vm/gc/shared/genCollectedHeap.cpp +++ b/src/share/vm/gc/shared/genCollectedHeap.cpp @@ -30,6 +30,7 @@ #include "code/icBuffer.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" @@ -315,9 +316,7 @@ bool restore_marks_for_biased_locking) { // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause - // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later - // so we can assume here that the next GC id is what we want. - GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek()); + GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL); TraceCollectorStats tcs(gen->counters()); TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); @@ -434,6 +433,8 @@ return; // GC is disabled (e.g. JNI GetXXXCritical operation) } + GCIdMark gc_id_mark; + const bool do_clear_all_soft_refs = clear_all_soft_refs || collector_policy()->should_clear_all_soft_refs(); @@ -449,9 +450,7 @@ bool complete = full && (max_generation == OldGen); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later - // so we can assume here that the next GC id is what we want. - GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL); gc_prologue(complete); increment_total_collections(complete); @@ -489,6 +488,7 @@ bool must_restore_marks_for_biased_locking = false; if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) { + GCIdMark gc_id_mark; if (!complete) { // The full_collections increment was missed above. increment_total_full_collections(); diff --git a/src/share/vm/gc/shared/objectCountEventSender.cpp b/src/share/vm/gc/shared/objectCountEventSender.cpp --- a/src/share/vm/gc/shared/objectCountEventSender.cpp +++ b/src/share/vm/gc/shared/objectCountEventSender.cpp @@ -33,13 +33,13 @@ #include "utilities/ticks.hpp" #if INCLUDE_SERVICES -void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { +void ObjectCountEventSender::send(const KlassInfoEntry* entry, const Ticks& timestamp) { #if INCLUDE_TRACE assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId), "Only call this method if the event is enabled"); EventObjectCountAfterGC event(UNTIMED); - event.set_gcId(gc_id.id()); + event.set_gcId(GCId::current()); event.set_class(entry->klass()); event.set_count(entry->count()); event.set_totalSize(entry->words() * BytesPerWord); diff --git a/src/share/vm/gc/shared/objectCountEventSender.hpp b/src/share/vm/gc/shared/objectCountEventSender.hpp --- a/src/share/vm/gc/shared/objectCountEventSender.hpp +++ b/src/share/vm/gc/shared/objectCountEventSender.hpp @@ -36,7 +36,7 @@ class ObjectCountEventSender : public AllStatic { public: - static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp); + static void send(const KlassInfoEntry* entry, const Ticks& timestamp); static bool should_send_event(); }; diff --git a/src/share/vm/gc/shared/referenceProcessor.cpp b/src/share/vm/gc/shared/referenceProcessor.cpp --- a/src/share/vm/gc/shared/referenceProcessor.cpp +++ b/src/share/vm/gc/shared/referenceProcessor.cpp @@ -187,8 +187,7 @@ OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, - GCTimer* gc_timer, - GCId gc_id) { + GCTimer* gc_timer) { assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); // Stop treating discovered references specially. @@ -209,7 +208,7 @@ // Soft references size_t soft_count = 0; { - GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id); + GCTraceTime tt("SoftReference", trace_time, false, gc_timer); soft_count = process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, is_alive, keep_alive, complete_gc, task_executor); @@ -220,7 +219,7 @@ // Weak references size_t weak_count = 0; { - GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id); + GCTraceTime tt("WeakReference", trace_time, false, gc_timer); weak_count = process_discovered_reflist(_discoveredWeakRefs, NULL, true, is_alive, keep_alive, complete_gc, task_executor); @@ -229,7 +228,7 @@ // Final references size_t final_count = 0; { - GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id); + GCTraceTime tt("FinalReference", trace_time, false, gc_timer); final_count = process_discovered_reflist(_discoveredFinalRefs, NULL, false, is_alive, keep_alive, complete_gc, task_executor); @@ -238,7 +237,7 @@ // Phantom references size_t phantom_count = 0; { - GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id); + GCTraceTime tt("PhantomReference", trace_time, false, gc_timer); phantom_count = process_discovered_reflist(_discoveredPhantomRefs, NULL, false, is_alive, keep_alive, complete_gc, task_executor); @@ -261,7 +260,7 @@ // resurrect a "post-mortem" object. size_t jni_weak_ref_count = 0; { - GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); + GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); if (task_executor != NULL) { task_executor->set_single_threaded_mode(); } @@ -1156,13 +1155,12 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield, - GCTimer* gc_timer, - GCId gc_id) { + GCTimer* gc_timer) { // Soft references { GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer, gc_id); + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1175,7 +1173,7 @@ // Weak references { GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer, gc_id); + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1188,7 +1186,7 @@ // Final references { GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer, gc_id); + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1201,7 +1199,7 @@ // Phantom references { GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer, gc_id); + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; diff --git a/src/share/vm/gc/shared/referenceProcessor.hpp b/src/share/vm/gc/shared/referenceProcessor.hpp --- a/src/share/vm/gc/shared/referenceProcessor.hpp +++ b/src/share/vm/gc/shared/referenceProcessor.hpp @@ -331,8 +331,7 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield, - GCTimer* gc_timer, - GCId gc_id); + GCTimer* gc_timer); // Returns the name of the discovered reference list // occupying the i / _num_q slot. @@ -441,8 +440,7 @@ OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, - GCTimer *gc_timer, - GCId gc_id); + GCTimer *gc_timer); // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); diff --git a/src/share/vm/gc/shared/workgroup.cpp b/src/share/vm/gc/shared/workgroup.cpp --- a/src/share/vm/gc/shared/workgroup.cpp +++ b/src/share/vm/gc/shared/workgroup.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" @@ -328,6 +329,7 @@ void GangWorker::run_task(WorkData data) { print_task_started(data); + GCIdMark gc_id_mark(data._task->gc_id()); data._task->work(data._worker_id); print_task_done(data); diff --git a/src/share/vm/gc/shared/workgroup.hpp b/src/share/vm/gc/shared/workgroup.hpp --- a/src/share/vm/gc/shared/workgroup.hpp +++ b/src/share/vm/gc/shared/workgroup.hpp @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" #include "runtime/globals.hpp" #include "runtime/thread.hpp" +#include "gc/shared/gcId.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -54,9 +55,13 @@ // You subclass this to supply your own work() method class AbstractGangTask VALUE_OBJ_CLASS_SPEC { const char* _name; + const uint _gc_id; public: - AbstractGangTask(const char* name) : _name(name) {} + AbstractGangTask(const char* name) : + _name(name), + _gc_id(GCId::current_raw()) // Use current_raw() here since the G1ParVerifyTask can be called outside of a GC (at VM exit) + {} // The abstract work method. // The argument tells you which member of the gang you are. @@ -64,6 +69,7 @@ // Debugging accessor for the name. const char* name() const { return _name; } + const uint gc_id() const { return _gc_id; } }; struct WorkData { diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp +++ b/src/share/vm/runtime/thread.cpp @@ -31,6 +31,7 @@ #include "code/codeCacheExtensions.hpp" #include "code/scopeDesc.hpp" #include "compiler/compileBroker.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/workgroup.hpp" #include "interpreter/interpreter.hpp" @@ -219,6 +220,7 @@ NOT_PRODUCT(_skip_gcalot = false;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); + _gc_id = GCId::undefined(); _vm_operation_started_count = 0; _vm_operation_completed_count = 0; _current_pending_monitor = NULL; diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp +++ b/src/share/vm/runtime/thread.hpp @@ -266,6 +266,7 @@ ThreadLocalAllocBuffer _tlab; // Thread-local eden jlong _allocated_bytes; // Cumulative number of bytes allocated on // the Java heap + uint _gc_id; // The current GC id when a thread takes part in GC TRACE_DATA _trace_data; // Thread-local data for tracing @@ -425,6 +426,9 @@ void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } inline jlong cooked_allocated_bytes(); + void set_gc_id(uint gc_id) { _gc_id = gc_id; } + uint gc_id() { return _gc_id; } + TRACE_DATA* trace_data() { return &_trace_data; } const ThreadExt& ext() const { return _ext; } diff --git a/src/share/vm/utilities/ostream.cpp b/src/share/vm/utilities/ostream.cpp --- a/src/share/vm/utilities/ostream.cpp +++ b/src/share/vm/utilities/ostream.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "compiler/compileLog.hpp" #include "gc/shared/gcId.hpp" +#include "gc/shared/gcId.hpp" #include "oops/oop.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/os.hpp" @@ -238,11 +239,11 @@ return; } -void outputStream::gclog_stamp(const GCId& gc_id) { +void outputStream::gclog_stamp() { date_stamp(PrintGCDateStamps); stamp(PrintGCTimeStamps); if (PrintGCID) { - print("#%u: ", gc_id.id()); + print("#%u: ", GCId::current()); } } diff --git a/src/share/vm/utilities/ostream.hpp b/src/share/vm/utilities/ostream.hpp --- a/src/share/vm/utilities/ostream.hpp +++ b/src/share/vm/utilities/ostream.hpp @@ -108,7 +108,7 @@ void date_stamp(bool guard) { date_stamp(guard, "", ": "); } - void gclog_stamp(const GCId& gc_id); + void gclog_stamp(); // portable printing of 64 bit integers void print_jlong(jlong value); # HG changeset patch # User brutisso # Date 1441807806 -7200 # Wed Sep 09 16:10:06 2015 +0200 # Node ID dc907545d688258821199a07dcedb977a09b50e0 # Parent c7f4ce4d186866e5d4286bdf4b06e79c598e459d [mq]: webrev.02 diff --git a/src/share/vm/gc/cms/vmCMSOperations.cpp b/src/share/vm/gc/cms/vmCMSOperations.cpp --- a/src/share/vm/gc/cms/vmCMSOperations.cpp +++ b/src/share/vm/gc/cms/vmCMSOperations.cpp @@ -134,7 +134,6 @@ return; } HS_PRIVATE_CMS_INITMARK_BEGIN(); - GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); @@ -162,7 +161,6 @@ return; } HS_PRIVATE_CMS_REMARK_BEGIN(); - GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); diff --git a/src/share/vm/gc/cms/vmCMSOperations.hpp b/src/share/vm/gc/cms/vmCMSOperations.hpp --- a/src/share/vm/gc/cms/vmCMSOperations.hpp +++ b/src/share/vm/gc/cms/vmCMSOperations.hpp @@ -27,7 +27,6 @@ #include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/shared/gcCause.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/vmGCOperations.hpp" #include "runtime/vm_operations.hpp" @@ -54,7 +53,6 @@ protected: CMSCollector* _collector; // associated collector bool _prologue_succeeded; // whether doit_prologue succeeded - uint _gc_id; bool lost_race() const; @@ -65,8 +63,7 @@ public: VM_CMS_Operation(CMSCollector* collector): _collector(collector), - _prologue_succeeded(false), - _gc_id(GCId::current()) {} + _prologue_succeeded(false) {} ~VM_CMS_Operation() {} // The legal collector state for executing this CMS op. diff --git a/src/share/vm/gc/cms/yieldingWorkgroup.cpp b/src/share/vm/gc/cms/yieldingWorkgroup.cpp --- a/src/share/vm/gc/cms/yieldingWorkgroup.cpp +++ b/src/share/vm/gc/cms/yieldingWorkgroup.cpp @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "gc/cms/yieldingWorkgroup.hpp" -#include "gc/shared/gcId.hpp" #include "utilities/macros.hpp" YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id) @@ -341,7 +340,6 @@ // Now, release the gang mutex and do the work. { MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag); - GCIdMark gc_id_mark(data.task()->gc_id()); data.task()->work(id); // This might include yielding } // Reacquire monitor and note completion of this worker diff --git a/src/share/vm/gc/g1/concurrentMark.cpp b/src/share/vm/gc/g1/concurrentMark.cpp --- a/src/share/vm/gc/g1/concurrentMark.cpp +++ b/src/share/vm/gc/g1/concurrentMark.cpp @@ -41,7 +41,6 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/suspendibleThreadSet.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" diff --git a/src/share/vm/gc/g1/concurrentMark.hpp b/src/share/vm/gc/g1/concurrentMark.hpp --- a/src/share/vm/gc/g1/concurrentMark.hpp +++ b/src/share/vm/gc/g1/concurrentMark.hpp @@ -28,7 +28,6 @@ #include "classfile/javaClasses.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/heapRegionSet.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/taskqueue.hpp" class G1CollectedHeap; diff --git a/src/share/vm/gc/g1/concurrentMarkThread.cpp b/src/share/vm/gc/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc/g1/concurrentMarkThread.cpp +++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp @@ -110,6 +110,7 @@ } GCIdMark gc_id_mark; + { ResourceMark rm; HandleMark hm; diff --git a/src/share/vm/gc/g1/g1CollectedHeap.cpp b/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp @@ -3992,360 +3992,364 @@ _gc_timer_stw->register_gc_start(); - GCIdMark gc_id_mark; - _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; wait_for_root_region_scanning(); - G1Log::update_level(); - print_heap_before_gc(); - trace_heap_before_gc(_gc_tracer_stw); - - verify_region_sets_optional(); - verify_dirty_young_regions(); - - // This call will decide whether this pause is an initial-mark - // pause. If it is, during_initial_mark_pause() will return true - // for the duration of this pause. - g1_policy()->decide_on_conc_mark_initiation(); - - // We do not allow initial-mark to be piggy-backed on a mixed GC. - assert(!collector_state()->during_initial_mark_pause() || - collector_state()->gcs_are_young(), "sanity"); - - // We also do not allow mixed GCs during marking. - assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity"); - - // Record whether this pause is an initial mark. When the current - // thread has completed its logging output and it's safe to signal - // the CM thread, the flag's value in the policy has been reset. - bool should_start_conc_mark = collector_state()->during_initial_mark_pause(); - - // Inner scope for scope based logging, timers, and stats collection + bool should_start_conc_mark = fasle; { - EvacuationInfo evacuation_info; - - if (collector_state()->during_initial_mark_pause()) { - // We are about to start a marking cycle, so we increment the - // full collection counter. - increment_old_marking_cycles_started(); - register_concurrent_cycle_start(_gc_timer_stw->gc_start()); + GCIdMark gc_id_mark; + _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); + + G1Log::update_level(); + print_heap_before_gc(); + trace_heap_before_gc(_gc_tracer_stw); + + verify_region_sets_optional(); + verify_dirty_young_regions(); + + // This call will decide whether this pause is an initial-mark + // pause. If it is, during_initial_mark_pause() will return true + // for the duration of this pause. + g1_policy()->decide_on_conc_mark_initiation(); + + // We do not allow initial-mark to be piggy-backed on a mixed GC. + assert(!collector_state()->during_initial_mark_pause() || + collector_state()->gcs_are_young(), "sanity"); + + // We also do not allow mixed GCs during marking. + assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity"); + + // Record whether this pause is an initial mark. When the current + // thread has completed its logging output and it's safe to signal + // the CM thread, the flag's value in the policy has been reset. + should_start_conc_mark = collector_state()->during_initial_mark_pause(); + + // Inner scope for scope based logging, timers, and stats collection + { + EvacuationInfo evacuation_info; + + if (collector_state()->during_initial_mark_pause()) { + // We are about to start a marking cycle, so we increment the + // full collection counter. + increment_old_marking_cycles_started(); + register_concurrent_cycle_start(_gc_timer_stw->gc_start()); + } + + _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); + + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + + uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), + workers()->active_workers(), + Threads::number_of_non_daemon_threads()); + workers()->set_active_workers(active_workers); + + double pause_start_sec = os::elapsedTime(); + g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress()); + log_gc_header(); + + TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); + TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); + + // If the secondary_free_list is not empty, append it to the + // free_list. No need to wait for the cleanup operation to finish; + // the region allocation code will check the secondary_free_list + // and wait if necessary. If the G1StressConcRegionFreeing flag is + // set, skip this step so that the region allocation code has to + // get entries from the secondary_free_list. + if (!G1StressConcRegionFreeing) { + append_secondary_free_list_if_not_empty_with_lock(); + } + + assert(check_young_list_well_formed(), "young list should be well formed"); + + // Don't dynamically change the number of GC threads this early. A value of + // 0 is used to indicate serial work. When parallel work is done, + // it will be set. + + { // Call to jvmpi::post_class_unload_events must occur outside of active GC + IsGCActiveMark x; + + gc_prologue(false); + increment_total_collections(false /* full gc */); + increment_gc_time_stamp(); + + verify_before_gc(); + + check_bitmaps("GC Start"); + + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // Please see comment in g1CollectedHeap.hpp and + // G1CollectedHeap::ref_processing_init() to see how + // reference processing currently works in G1. + + // Enable discovery in the STW reference processor + ref_processor_stw()->enable_discovery(); + + { + // We want to temporarily turn off discovery by the + // CM ref processor, if necessary, and turn it back on + // on again later if we do. Using a scoped + // NoRefDiscovery object will do this. + NoRefDiscovery no_cm_discovery(ref_processor_cm()); + + // Forget the current alloc region (we might even choose it to be part + // of the collection set!). + _allocator->release_mutator_alloc_region(); + + // We should call this after we retire the mutator alloc + // region(s) so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(false /* full */, (size_t) total_collections()); + + // This timing is only used by the ergonomics to handle our pause target. + // It is unclear why this should not include the full pause. We will + // investigate this in CR 7178365. + // + // Preserving the old comment here if that helps the investigation: + // + // The elapsed time induced by the start time below deliberately elides + // the possible verification above. + double sample_start_time_sec = os::elapsedTime(); + +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); +#endif // YOUNG_LIST_VERBOSE + + g1_policy()->record_collection_pause_start(sample_start_time_sec); + +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); + _young_list->print(); +#endif // YOUNG_LIST_VERBOSE + + if (collector_state()->during_initial_mark_pause()) { + concurrent_mark()->checkpointRootsInitialPre(); + } + +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); +#endif // YOUNG_LIST_VERBOSE + + g1_policy()->finalize_cset(target_pause_time_ms); + + evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); + + register_humongous_regions_with_cset(); + + assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); + + _cm->note_start_of_gc(); + // We call this after finalize_cset() to + // ensure that the CSet has been finalized. + _cm->verify_no_cset_oops(); + + if (_hr_printer.is_active()) { + HeapRegion* hr = g1_policy()->collection_set(); + while (hr != NULL) { + _hr_printer.cset(hr); + hr = hr->next_in_collection_set(); + } + } + +#ifdef ASSERT + VerifyCSetClosure cl; + collection_set_iterate(&cl); +#endif // ASSERT + + setup_surviving_young_words(); + + // Initialize the GC alloc regions. + _allocator->init_gc_alloc_regions(evacuation_info); + + G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers()); + // Actually do the work... + evacuate_collection_set(evacuation_info, &per_thread_states); + + free_collection_set(g1_policy()->collection_set(), evacuation_info); + + eagerly_reclaim_humongous_regions(); + + g1_policy()->clear_collection_set(); + + cleanup_surviving_young_words(); + + // Start a new incremental collection set for the next pause. + g1_policy()->start_incremental_cset_building(); + + clear_cset_fast_test(); + + _young_list->reset_sampled_info(); + + // Don't check the whole heap at this point as the + // GC alloc regions from this pause have been tagged + // as survivors and moved on to the survivor list. + // Survivor regions will fail the !is_young() check. + assert(check_young_list_empty(false /* check_heap */), + "young list should be empty"); + +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); + _young_list->print(); +#endif // YOUNG_LIST_VERBOSE + + g1_policy()->record_survivor_regions(_young_list->survivor_length(), + _young_list->first_survivor_region(), + _young_list->last_survivor_region()); + + _young_list->reset_auxilary_lists(); + + if (evacuation_failed()) { + set_used(recalculate_used()); + if (_archive_allocator != NULL) { + _archive_allocator->clear_used(); + } + for (uint i = 0; i < ParallelGCThreads; i++) { + if (_evacuation_failed_info_array[i].has_failed()) { + _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); + } + } + } else { + // The "used" of the the collection set have already been subtracted + // when they were freed. Add in the bytes evacuated. + increase_used(g1_policy()->bytes_copied_during_gc()); + } + + if (collector_state()->during_initial_mark_pause()) { + // We have to do this before we notify the CM threads that + // they can start working to make sure that all the + // appropriate initialization is done on the CM object. + concurrent_mark()->checkpointRootsInitialPost(); + collector_state()->set_mark_in_progress(true); + // Note that we don't actually trigger the CM thread at + // this point. We do that later when we're sure that + // the current thread has completed its logging output. + } + + allocate_dummy_regions(); + +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); +#endif // YOUNG_LIST_VERBOSE + + _allocator->init_mutator_alloc_region(); + + { + size_t expand_bytes = g1_policy()->expansion_amount(); + if (expand_bytes > 0) { + size_t bytes_before = capacity(); + // No need for an ergo verbose message here, + // expansion_amount() does this when it returns a value > 0. + if (!expand(expand_bytes)) { + // We failed to expand the heap. Cannot do anything about it. + } + } + } + + // We redo the verification but now wrt to the new CSet which + // has just got initialized after the previous CSet was freed. + _cm->verify_no_cset_oops(); + _cm->note_end_of_gc(); + + // This timing is only used by the ergonomics to handle our pause target. + // It is unclear why this should not include the full pause. We will + // investigate this in CR 7178365. + double sample_end_time_sec = os::elapsedTime(); + double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; + g1_policy()->record_collection_pause_end(pause_time_ms); + + evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); + evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); + + MemoryService::track_memory_usage(); + + // In prepare_for_verify() below we'll need to scan the deferred + // update buffers to bring the RSets up-to-date if + // G1HRRSFlushLogBuffersOnVerify has been set. While scanning + // the update buffers we'll probably need to scan cards on the + // regions we just allocated to (i.e., the GC alloc + // regions). However, during the last GC we called + // set_saved_mark() on all the GC alloc regions, so card + // scanning might skip the [saved_mark_word()...top()] area of + // those regions (i.e., the area we allocated objects into + // during the last GC). But it shouldn't. Given that + // saved_mark_word() is conditional on whether the GC time stamp + // on the region is current or not, by incrementing the GC time + // stamp here we invalidate all the GC time stamps on all the + // regions and saved_mark_word() will simply return top() for + // all the regions. This is a nicer way of ensuring this rather + // than iterating over the regions and fixing them. In fact, the + // GC time stamp increment here also ensures that + // saved_mark_word() will return top() between pauses, i.e., + // during concurrent refinement. So we don't need the + // is_gc_active() check to decided which top to use when + // scanning cards (see CR 7039627). + increment_gc_time_stamp(); + + verify_after_gc(); + check_bitmaps("GC End"); + + assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); + ref_processor_stw()->verify_no_references_recorded(); + + // CM reference discovery will be re-enabled if necessary. + } + + // We should do this after we potentially expand the heap so + // that all the COMMIT events are generated before the end GC + // event, and after we retire the GC alloc regions so that all + // RETIRE events are generated before the end GC event. + _hr_printer.end_gc(false /* full */, (size_t) total_collections()); + +#ifdef TRACESPINNING + ParallelTaskTerminator::print_termination_counts(); +#endif + + gc_epilogue(false); + } + + // Print the remainder of the GC log output. + log_gc_footer(os::elapsedTime() - pause_start_sec); + + // It is not yet to safe to tell the concurrent mark to + // start as we have some optional output below. We don't want the + // output from the concurrent mark thread interfering with this + // logging output either. + + _hrm.verify_optional(); + verify_region_sets_optional(); + + TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats()); + TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); + + print_heap_after_gc(); + trace_heap_after_gc(_gc_tracer_stw); + + // We must call G1MonitoringSupport::update_sizes() in the same scoping level + // as an active TraceMemoryManagerStats object (i.e. before the destructor for the + // TraceMemoryManagerStats is called) so that the G1 memory pools are updated + // before any GC notifications are raised. + g1mm()->update_sizes(); + + _gc_tracer_stw->report_evacuation_info(&evacuation_info); + _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); + _gc_timer_stw->register_gc_end(); + _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); } - - _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); - - TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - - uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), - workers()->active_workers(), - Threads::number_of_non_daemon_threads()); - workers()->set_active_workers(active_workers); - - double pause_start_sec = os::elapsedTime(); - g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress()); - log_gc_header(); - - TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); - TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); - - // If the secondary_free_list is not empty, append it to the - // free_list. No need to wait for the cleanup operation to finish; - // the region allocation code will check the secondary_free_list - // and wait if necessary. If the G1StressConcRegionFreeing flag is - // set, skip this step so that the region allocation code has to - // get entries from the secondary_free_list. - if (!G1StressConcRegionFreeing) { - append_secondary_free_list_if_not_empty_with_lock(); - } - - assert(check_young_list_well_formed(), "young list should be well formed"); - - // Don't dynamically change the number of GC threads this early. A value of - // 0 is used to indicate serial work. When parallel work is done, - // it will be set. - - { // Call to jvmpi::post_class_unload_events must occur outside of active GC - IsGCActiveMark x; - - gc_prologue(false); - increment_total_collections(false /* full gc */); - increment_gc_time_stamp(); - - verify_before_gc(); - - check_bitmaps("GC Start"); - - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - // Please see comment in g1CollectedHeap.hpp and - // G1CollectedHeap::ref_processing_init() to see how - // reference processing currently works in G1. - - // Enable discovery in the STW reference processor - ref_processor_stw()->enable_discovery(); - - { - // We want to temporarily turn off discovery by the - // CM ref processor, if necessary, and turn it back on - // on again later if we do. Using a scoped - // NoRefDiscovery object will do this. - NoRefDiscovery no_cm_discovery(ref_processor_cm()); - - // Forget the current alloc region (we might even choose it to be part - // of the collection set!). - _allocator->release_mutator_alloc_region(); - - // We should call this after we retire the mutator alloc - // region(s) so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(false /* full */, (size_t) total_collections()); - - // This timing is only used by the ergonomics to handle our pause target. - // It is unclear why this should not include the full pause. We will - // investigate this in CR 7178365. - // - // Preserving the old comment here if that helps the investigation: - // - // The elapsed time induced by the start time below deliberately elides - // the possible verification above. - double sample_start_time_sec = os::elapsedTime(); - -#if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); - _young_list->print(); - g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); -#endif // YOUNG_LIST_VERBOSE - - g1_policy()->record_collection_pause_start(sample_start_time_sec); - -#if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); - _young_list->print(); -#endif // YOUNG_LIST_VERBOSE - - if (collector_state()->during_initial_mark_pause()) { - concurrent_mark()->checkpointRootsInitialPre(); - } - -#if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); - _young_list->print(); - g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); -#endif // YOUNG_LIST_VERBOSE - - g1_policy()->finalize_cset(target_pause_time_ms); - - evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); - - register_humongous_regions_with_cset(); - - assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); - - _cm->note_start_of_gc(); - // We call this after finalize_cset() to - // ensure that the CSet has been finalized. - _cm->verify_no_cset_oops(); - - if (_hr_printer.is_active()) { - HeapRegion* hr = g1_policy()->collection_set(); - while (hr != NULL) { - _hr_printer.cset(hr); - hr = hr->next_in_collection_set(); - } - } - -#ifdef ASSERT - VerifyCSetClosure cl; - collection_set_iterate(&cl); -#endif // ASSERT - - setup_surviving_young_words(); - - // Initialize the GC alloc regions. - _allocator->init_gc_alloc_regions(evacuation_info); - - G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers()); - // Actually do the work... - evacuate_collection_set(evacuation_info, &per_thread_states); - - free_collection_set(g1_policy()->collection_set(), evacuation_info); - - eagerly_reclaim_humongous_regions(); - - g1_policy()->clear_collection_set(); - - cleanup_surviving_young_words(); - - // Start a new incremental collection set for the next pause. - g1_policy()->start_incremental_cset_building(); - - clear_cset_fast_test(); - - _young_list->reset_sampled_info(); - - // Don't check the whole heap at this point as the - // GC alloc regions from this pause have been tagged - // as survivors and moved on to the survivor list. - // Survivor regions will fail the !is_young() check. - assert(check_young_list_empty(false /* check_heap */), - "young list should be empty"); - -#if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); - _young_list->print(); -#endif // YOUNG_LIST_VERBOSE - - g1_policy()->record_survivor_regions(_young_list->survivor_length(), - _young_list->first_survivor_region(), - _young_list->last_survivor_region()); - - _young_list->reset_auxilary_lists(); - - if (evacuation_failed()) { - set_used(recalculate_used()); - if (_archive_allocator != NULL) { - _archive_allocator->clear_used(); - } - for (uint i = 0; i < ParallelGCThreads; i++) { - if (_evacuation_failed_info_array[i].has_failed()) { - _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); - } - } - } else { - // The "used" of the the collection set have already been subtracted - // when they were freed. Add in the bytes evacuated. - increase_used(g1_policy()->bytes_copied_during_gc()); - } - - if (collector_state()->during_initial_mark_pause()) { - // We have to do this before we notify the CM threads that - // they can start working to make sure that all the - // appropriate initialization is done on the CM object. - concurrent_mark()->checkpointRootsInitialPost(); - collector_state()->set_mark_in_progress(true); - // Note that we don't actually trigger the CM thread at - // this point. We do that later when we're sure that - // the current thread has completed its logging output. - } - - allocate_dummy_regions(); - -#if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); - _young_list->print(); - g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); -#endif // YOUNG_LIST_VERBOSE - - _allocator->init_mutator_alloc_region(); - - { - size_t expand_bytes = g1_policy()->expansion_amount(); - if (expand_bytes > 0) { - size_t bytes_before = capacity(); - // No need for an ergo verbose message here, - // expansion_amount() does this when it returns a value > 0. - if (!expand(expand_bytes)) { - // We failed to expand the heap. Cannot do anything about it. - } - } - } - - // We redo the verification but now wrt to the new CSet which - // has just got initialized after the previous CSet was freed. - _cm->verify_no_cset_oops(); - _cm->note_end_of_gc(); - - // This timing is only used by the ergonomics to handle our pause target. - // It is unclear why this should not include the full pause. We will - // investigate this in CR 7178365. - double sample_end_time_sec = os::elapsedTime(); - double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; - g1_policy()->record_collection_pause_end(pause_time_ms); - - evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); - evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); - - MemoryService::track_memory_usage(); - - // In prepare_for_verify() below we'll need to scan the deferred - // update buffers to bring the RSets up-to-date if - // G1HRRSFlushLogBuffersOnVerify has been set. While scanning - // the update buffers we'll probably need to scan cards on the - // regions we just allocated to (i.e., the GC alloc - // regions). However, during the last GC we called - // set_saved_mark() on all the GC alloc regions, so card - // scanning might skip the [saved_mark_word()...top()] area of - // those regions (i.e., the area we allocated objects into - // during the last GC). But it shouldn't. Given that - // saved_mark_word() is conditional on whether the GC time stamp - // on the region is current or not, by incrementing the GC time - // stamp here we invalidate all the GC time stamps on all the - // regions and saved_mark_word() will simply return top() for - // all the regions. This is a nicer way of ensuring this rather - // than iterating over the regions and fixing them. In fact, the - // GC time stamp increment here also ensures that - // saved_mark_word() will return top() between pauses, i.e., - // during concurrent refinement. So we don't need the - // is_gc_active() check to decided which top to use when - // scanning cards (see CR 7039627). - increment_gc_time_stamp(); - - verify_after_gc(); - check_bitmaps("GC End"); - - assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); - ref_processor_stw()->verify_no_references_recorded(); - - // CM reference discovery will be re-enabled if necessary. - } - - // We should do this after we potentially expand the heap so - // that all the COMMIT events are generated before the end GC - // event, and after we retire the GC alloc regions so that all - // RETIRE events are generated before the end GC event. - _hr_printer.end_gc(false /* full */, (size_t) total_collections()); - -#ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); -#endif - - gc_epilogue(false); - } - - // Print the remainder of the GC log output. - log_gc_footer(os::elapsedTime() - pause_start_sec); - - // It is not yet to safe to tell the concurrent mark to - // start as we have some optional output below. We don't want the - // output from the concurrent mark thread interfering with this - // logging output either. - - _hrm.verify_optional(); - verify_region_sets_optional(); - - TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats()); - TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); - - print_heap_after_gc(); - trace_heap_after_gc(_gc_tracer_stw); - - // We must call G1MonitoringSupport::update_sizes() in the same scoping level - // as an active TraceMemoryManagerStats object (i.e. before the destructor for the - // TraceMemoryManagerStats is called) so that the G1 memory pools are updated - // before any GC notifications are raised. - g1mm()->update_sizes(); - - _gc_tracer_stw->report_evacuation_info(&evacuation_info); - _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); - _gc_timer_stw->register_gc_end(); - _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); - } - // It should now be safe to tell the concurrent mark thread to start - // without its logging output interfering with the logging output - // that came from the pause. + // It should now be safe to tell the concurrent mark thread to start + // without its logging output interfering with the logging output + // that came from the pause. + } if (should_start_conc_mark) { // CAUTION: after the doConcurrentMark() call below, diff --git a/src/share/vm/gc/g1/vm_operations_g1.cpp b/src/share/vm/gc/g1/vm_operations_g1.cpp --- a/src/share/vm/gc/g1/vm_operations_g1.cpp +++ b/src/share/vm/gc/g1/vm_operations_g1.cpp @@ -26,7 +26,6 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" -#include "gc/shared/gcId.hpp" #include "gc/g1/g1Log.hpp" #include "gc/g1/vm_operations_g1.hpp" #include "gc/shared/gcTimer.hpp" @@ -228,7 +227,6 @@ void VM_CGC_Operation::doit() { TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCIdMark gc_id_mark(_gc_id); GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm()); IsGCActiveMark x; _cl->do_void(); diff --git a/src/share/vm/gc/g1/vm_operations_g1.hpp b/src/share/vm/gc/g1/vm_operations_g1.hpp --- a/src/share/vm/gc/g1/vm_operations_g1.hpp +++ b/src/share/vm/gc/g1/vm_operations_g1.hpp @@ -26,7 +26,6 @@ #define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP #include "gc/g1/g1AllocationContext.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/vmGCOperations.hpp" // VM_operations for the G1 collector. @@ -105,7 +104,6 @@ VoidClosure* _cl; const char* _printGCMessage; bool _needs_pll; - uint _gc_id; protected: // java.lang.ref.Reference support @@ -114,7 +112,7 @@ public: VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll) - : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { } + : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { } virtual VMOp_Type type() const { return VMOp_CGC_Operation; } virtual void doit(); virtual bool doit_prologue(); diff --git a/src/share/vm/gc/shared/gcId.cpp b/src/share/vm/gc/shared/gcId.cpp --- a/src/share/vm/gc/shared/gcId.cpp +++ b/src/share/vm/gc/shared/gcId.cpp @@ -24,38 +24,28 @@ #include "precompiled.hpp" #include "gc/shared/gcId.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/thread.inline.hpp" uint GCId::_next_id = 0; - -NamedThread* currentNamedthread() { - assert(Thread::current()->is_Named_thread(), "This thread must be NamedThread"); - return (NamedThread*)Thread::current(); -} +uint GCId::_current_id = UNDEFINED; const uint GCId::create() { return _next_id++; } const uint GCId::current() { - assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id."); - return current_raw(); + assert(_current_id != UNDEFINED, "Using undefined GC ID"); + return _current_id; } -const uint GCId::current_raw() { - return currentNamedthread()->gc_id(); -} - -GCIdMark::GCIdMark() : _gc_id(GCId::create()) { - currentNamedthread()->set_gc_id(_gc_id); -} - -GCIdMark::GCIdMark(uint gc_id) : _gc_id(gc_id) { - currentNamedthread()->set_gc_id(_gc_id); +GCIdMark::GCIdMark() { + _previous_gc_id = GCId::_current_id; + uint gc_id = GCId::create(); + GCId::set_current(gc_id); + DEBUG_ONLY(_gc_id = gc_id;) } GCIdMark::~GCIdMark() { - currentNamedthread()->set_gc_id(GCId::undefined()); + assert(_gc_id == GCId::_current_id, err_msg("GCIdMarks for %u and %u overlap.", _gc_id, GCId::_current_id)); + GCId::set_current(_previous_gc_id); } diff --git a/src/share/vm/gc/shared/gcId.hpp b/src/share/vm/gc/shared/gcId.hpp --- a/src/share/vm/gc/shared/gcId.hpp +++ b/src/share/vm/gc/shared/gcId.hpp @@ -30,22 +30,20 @@ class GCId : public AllStatic { friend class GCIdMark; static uint _next_id; + static uint _current_id; static const uint UNDEFINED = (uint)-1; static const uint create(); + static void set_current(uint gc_id) { _current_id = gc_id; } public: - // Returns the currently active GC id. Asserts that there is an active GC id. static const uint current(); - // Same as current() but can return undefined() if no GC id is currently active - static const uint current_raw(); - static const uint undefined() { return UNDEFINED; } }; class GCIdMark { - uint _gc_id; + uint _previous_gc_id; + DEBUG_ONLY(uint _gc_id;) public: GCIdMark(); - GCIdMark(uint gc_id); ~GCIdMark(); }; diff --git a/src/share/vm/gc/shared/gcTraceTime.cpp b/src/share/vm/gc/shared/gcTraceTime.cpp --- a/src/share/vm/gc/shared/gcTraceTime.cpp +++ b/src/share/vm/gc/shared/gcTraceTime.cpp @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" diff --git a/src/share/vm/gc/shared/workgroup.cpp b/src/share/vm/gc/shared/workgroup.cpp --- a/src/share/vm/gc/shared/workgroup.cpp +++ b/src/share/vm/gc/shared/workgroup.cpp @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" @@ -329,7 +328,6 @@ void GangWorker::run_task(WorkData data) { print_task_started(data); - GCIdMark gc_id_mark(data._task->gc_id()); data._task->work(data._worker_id); print_task_done(data); diff --git a/src/share/vm/gc/shared/workgroup.hpp b/src/share/vm/gc/shared/workgroup.hpp --- a/src/share/vm/gc/shared/workgroup.hpp +++ b/src/share/vm/gc/shared/workgroup.hpp @@ -28,7 +28,6 @@ #include "memory/allocation.hpp" #include "runtime/globals.hpp" #include "runtime/thread.hpp" -#include "gc/shared/gcId.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -55,13 +54,9 @@ // You subclass this to supply your own work() method class AbstractGangTask VALUE_OBJ_CLASS_SPEC { const char* _name; - const uint _gc_id; public: - AbstractGangTask(const char* name) : - _name(name), - _gc_id(GCId::current_raw()) // Use current_raw() here since the G1ParVerifyTask can be called outside of a GC (at VM exit) - {} + AbstractGangTask(const char* name) : _name(name) {} // The abstract work method. // The argument tells you which member of the gang you are. @@ -69,7 +64,6 @@ // Debugging accessor for the name. const char* name() const { return _name; } - const uint gc_id() const { return _gc_id; } }; struct WorkData { diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp +++ b/src/share/vm/runtime/thread.cpp @@ -31,7 +31,6 @@ #include "code/codeCacheExtensions.hpp" #include "code/scopeDesc.hpp" #include "compiler/compileBroker.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/workgroup.hpp" #include "interpreter/interpreter.hpp" @@ -220,7 +219,6 @@ NOT_PRODUCT(_skip_gcalot = false;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); - _gc_id = GCId::undefined(); _vm_operation_started_count = 0; _vm_operation_completed_count = 0; _current_pending_monitor = NULL; diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp +++ b/src/share/vm/runtime/thread.hpp @@ -266,7 +266,6 @@ ThreadLocalAllocBuffer _tlab; // Thread-local eden jlong _allocated_bytes; // Cumulative number of bytes allocated on // the Java heap - uint _gc_id; // The current GC id when a thread takes part in GC TRACE_DATA _trace_data; // Thread-local data for tracing @@ -426,9 +425,6 @@ void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } inline jlong cooked_allocated_bytes(); - void set_gc_id(uint gc_id) { _gc_id = gc_id; } - uint gc_id() { return _gc_id; } - TRACE_DATA* trace_data() { return &_trace_data; } const ThreadExt& ext() const { return _ext; } diff --git a/src/share/vm/utilities/ostream.hpp b/src/share/vm/utilities/ostream.hpp --- a/src/share/vm/utilities/ostream.hpp +++ b/src/share/vm/utilities/ostream.hpp @@ -28,7 +28,6 @@ #include "memory/allocation.hpp" #include "runtime/timer.hpp" -class GCId; DEBUG_ONLY(class ResourceMark;) // Output streams for printing