--- old/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp 2020-06-25 10:40:20.288778057 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp 2020-06-25 10:40:19.888772079 +0200 @@ -42,6 +42,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/debug.hpp" +#include "utilities/ticks.hpp" // ======= Concurrent Mark Thread ======== @@ -102,15 +103,13 @@ double delay_end_sec = mmu_delay_end(g1_policy, remark); // Wait for timeout or thread termination request. MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag); - while (!_cm->has_aborted()) { + while (!_cm->has_aborted() && !should_terminate()) { double sleep_time_sec = (delay_end_sec - os::elapsedTime()); jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS); if (sleep_time_ms <= 0) { break; // Passed end time. } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) { break; // Timeout => reached end time. - } else if (should_terminate()) { - break; // Wakeup for pending termination request. } // Other (possibly spurious) wakeup. Retry with updated sleep time. } @@ -136,165 +135,191 @@ void G1ConcurrentMarkThread::run_service() { _vtime_start = os::elapsedVTime(); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1Policy* policy = g1h->policy(); - while (!should_terminate()) { - // wait until started is set. - sleep_before_next_cycle(); - if (should_terminate()) { + if (wait_for_next_cycle()) { break; } + run_cycle(); + } + _cm->root_regions()->cancel_scan(); +} + +void G1ConcurrentMarkThread::stop_service() { + MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); + CGC_lock->notify_all(); +} - GCIdMark gc_id_mark; +bool G1ConcurrentMarkThread::wait_for_next_cycle() { + assert(!in_progress(), "should have been cleared"); - _cm->concurrent_cycle_start(); + MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); + while (!started() && !should_terminate()) { + ml.wait(); + } - GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); - { - ResourceMark rm; - HandleMark hm; - double cycle_start = os::elapsedVTime(); + if (started()) { + set_in_progress(); + } - { + return should_terminate(); +} + +void G1ConcurrentMarkThread::run_cycle() { + enum ConcurrentCyclePhase { + CycleStart, + CLDClearClaimedMarks, + ScanRootRegions, + MarkFromRoots, + Preclean, + DelayToKeepMMUBeforeRemark, + PauseRemark, + RebuildRememberedSets, + DelayToKeepMMUBeforeCleanup, + PauseCleanup, + CleanupForNextMark, + CycleDone + }; + + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1Policy* policy = g1h->policy(); + + ConcurrentCyclePhase cur_state = CycleStart; + ConcurrentCyclePhase next_state; + + GCIdMark gc_id_mark; + + GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); + double cycle_start; + + Ticks mark_start; + Ticks mark_end; + uint iter = 1; + + while (!_cm->has_aborted() && cur_state != CycleDone) { + HandleMark hm; + ResourceMark rm; + + switch (cur_state) { + case CycleStart: { + concurrent_cycle_start(); + next_state = CLDClearClaimedMarks; + break; + } + case CLDClearClaimedMarks: { G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks"); ClassLoaderDataGraph::clear_claimed_marks(); + next_state = ScanRootRegions; + break; } - - // We have to ensure that we finish scanning the root regions - // before the next GC takes place. To ensure this we have to - // make sure that we do not join the STS until the root regions - // have been scanned. If we did then it's possible that a - // subsequent GC could block us from joining the STS and proceed - // without the root regions have been scanned which would be a - // correctness issue. - - { + case ScanRootRegions: { G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions"); _cm->scan_root_regions(); - } - - // Note: ConcurrentGCBreakpoints before here risk deadlock, - // because a young GC must wait for root region scanning. - - // It would be nice to use the G1ConcPhaseTimer class here but - // the "end" logging is inside the loop and not at the end of - // a scope. Also, the timer doesn't support nesting. - // Mimicking the same log output instead. - jlong mark_start = os::elapsed_counter(); - log_info(gc, marking)("Concurrent Mark (%.3fs)", - TimeHelper::counter_to_seconds(mark_start)); - for (uint iter = 1; !_cm->has_aborted(); ++iter) { - // Concurrent marking. - { - ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); - G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots"); - _cm->mark_from_roots(); - } - if (_cm->has_aborted()) { - break; - } - if (G1UseReferencePrecleaning) { - G1ConcPhaseTimer p(_cm, "Concurrent Preclean"); - _cm->preclean(); - } - if (_cm->has_aborted()) { - break; - } + // Note: ConcurrentGCBreakpoints before here risk deadlock, + // because a young GC must wait for root region scanning. - // Delay remark pause for MMU. + // It would be nice to use the G1ConcPhaseTimer class here but + // the "end" logging is inside the loop and not at the end of + // a scope. + cycle_start = os::elapsedVTime(); + mark_start = Ticks::now()(); + log_info(gc, marking)("Concurrent Mark (%.3fs)", mark_start.seconds()); + next_state = MarkFromRoots; + break; + } + case MarkFromRoots: { + ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); + G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots"); + _cm->mark_from_roots(); + next_state = G1UseReferencePrecleaning ? Preclean : DelayToKeepMMUBeforeRemark; + break; + } + case Preclean: { + G1ConcPhaseTimer p(_cm, "Concurrent Preclean"); + _cm->preclean(); + next_state = DelayToKeepMMUBeforeRemark; + break; + } + case DelayToKeepMMUBeforeRemark: { double mark_end_time = os::elapsedVTime(); - jlong mark_end = os::elapsed_counter(); + mark_end = Ticks::now(); _vtime_mark_accum += (mark_end_time - cycle_start); delay_to_keep_mmu(policy, true /* remark */); - if (_cm->has_aborted()) { - break; - } - + next_state = PauseRemark; + break; + } + case PauseRemark: { // Pause Remark. ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", - TimeHelper::counter_to_seconds(mark_start), - TimeHelper::counter_to_seconds(mark_end), - TimeHelper::counter_to_millis(mark_end - mark_start)); + mark_start.seconds(), + mark_end.seconds(), + (mark_end - mark_start).seconds() * 1000.0); CMRemark cl(_cm); VM_G1Concurrent op(&cl, "Pause Remark"); VMThread::execute(&op); - if (_cm->has_aborted()) { - break; - } else if (!_cm->restart_for_overflow()) { - break; // Exit loop if no restart requested. - } else { - // Loop to restart for overflow. + if (_cm->restart_for_overflow()) { log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)", iter); + iter++; + next_state = MarkFromRoots; + } else { + next_state = RebuildRememberedSets; } + break; } - - if (!_cm->has_aborted()) { + case RebuildRememberedSets: { G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets"); _cm->rebuild_rem_set_concurrently(); + next_state = DelayToKeepMMUBeforeCleanup; + break; } - - double end_time = os::elapsedVTime(); - // Update the total virtual time before doing this, since it will try - // to measure it to get the vtime for this marking. - _vtime_accum = (end_time - _vtime_start); - - if (!_cm->has_aborted()) { + case DelayToKeepMMUBeforeCleanup: { + double end_time = os::elapsedVTime(); + // Update the total virtual time before doing this, since it will try + // to measure it to get the vtime for this marking. + _vtime_accum = (end_time - _vtime_start); delay_to_keep_mmu(policy, false /* cleanup */); + next_state = PauseCleanup; + break; } - - if (!_cm->has_aborted()) { + case PauseCleanup: { CMCleanup cl_cl(_cm); VM_G1Concurrent op(&cl_cl, "Pause Cleanup"); VMThread::execute(&op); + next_state = CleanupForNextMark; + break; } - - // We now want to allow clearing of the marking bitmap to be - // suspended by a collection pause. - // We may have aborted just before the remark. Do not bother clearing the - // bitmap then, as it has been done during mark abort. - if (!_cm->has_aborted()) { + case CleanupForNextMark: { G1ConcPhaseTimer p(_cm, "Concurrent Cleanup for Next Mark"); _cm->cleanup_for_next_mark(); + next_state = CycleDone; + break; + } + default: { + guarantee(false, "Invalid state %u", cur_state); + break; } } - - // Update the number of full collections that have been - // completed. This will also notify the G1OldGCCount_lock in case a - // Java thread is waiting for a full GC to happen (e.g., it - // called System.gc() with +ExplicitGCInvokesConcurrent). - { - SuspendibleThreadSetJoiner sts_join; - g1h->increment_old_marking_cycles_completed(true /* concurrent */, - !_cm->has_aborted() /* liveness_completed */); - - _cm->concurrent_cycle_end(); - ConcurrentGCBreakpoints::notify_active_to_idle(); - } + cur_state = next_state; } - _cm->root_regions()->cancel_scan(); -} -void G1ConcurrentMarkThread::stop_service() { - MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); - CGC_lock->notify_all(); + concurrent_cycle_end(); } +void G1ConcurrentMarkThread::concurrent_cycle_start() { + _cm->concurrent_cycle_start(); +} -void G1ConcurrentMarkThread::sleep_before_next_cycle() { - // We join here because we don't want to do the "shouldConcurrentMark()" - // below while the world is otherwise stopped. - assert(!in_progress(), "should have been cleared"); - - MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); - while (!started() && !should_terminate()) { - ml.wait(); - } +void G1ConcurrentMarkThread::concurrent_cycle_end() { + // Update the number of full collections that have been + // completed. This will also notify the G1OldGCCount_lock in case a + // Java thread is waiting for a full GC to happen (e.g., it + // called System.gc() with +ExplicitGCInvokesConcurrent). + SuspendibleThreadSetJoiner sts_join; + G1CollectedHeap::heap()->increment_old_marking_cycles_completed(true /* concurrent */, + !_cm->has_aborted() /* liveness_complete */); - if (started()) { - set_in_progress(); - } + _cm->concurrent_cycle_end(); + ConcurrentGCBreakpoints::notify_active_to_idle(); }