< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp

Print this page
rev 60794 : imported patch 8247928-sjohanss-review

*** 40,57 **** #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "runtime/handles.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/debug.hpp" ! ! // ======= Concurrent Mark Thread ======== G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) : ConcurrentGCThread(), _vtime_start(0.0), _vtime_accum(0.0), - _vtime_mark_accum(0.0), _cm(cm), _state(Idle) { set_name("G1 Main Marker"); create_and_start(); --- 40,55 ---- #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "runtime/handles.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/debug.hpp" ! #include "utilities/ticks.hpp" G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) : ConcurrentGCThread(), _vtime_start(0.0), _vtime_accum(0.0), _cm(cm), _state(Idle) { set_name("G1 Main Marker"); create_and_start();
*** 75,118 **** void do_void(){ _cm->cleanup(); } }; ! double G1ConcurrentMarkThread::mmu_delay_end(G1Policy* g1_policy, bool remark) { // There are 3 reasons to use SuspendibleThreadSetJoiner. // 1. To avoid concurrency problem. // - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called // concurrently from ConcurrentMarkThread and VMThread. // 2. If currently a gc is running, but it has not yet updated the MMU, // we will not forget to consider that pause in the MMU calculation. // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished. // And then sleep for predicted amount of time by delay_to_keep_mmu(). SuspendibleThreadSetJoiner sts_join; ! const G1Analytics* analytics = g1_policy->analytics(); double prediction_ms = remark ? analytics->predict_remark_time_ms() : analytics->predict_cleanup_time_ms(); double prediction = prediction_ms / MILLIUNITS; ! G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); double now = os::elapsedTime(); return now + mmu_tracker->when_sec(now, prediction); } ! void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) { ! if (g1_policy->use_adaptive_young_list_length()) { ! double delay_end_sec = mmu_delay_end(g1_policy, remark); // Wait for timeout or thread termination request. MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag); ! while (!_cm->has_aborted()) { double sleep_time_sec = (delay_end_sec - os::elapsedTime()); jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS); if (sleep_time_ms <= 0) { break; // Passed end time. } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) { break; // Timeout => reached end time. - } else if (should_terminate()) { - break; // Wakeup for pending termination request. } // Other (possibly spurious) wakeup. Retry with updated sleep time. } } } --- 73,116 ---- void do_void(){ _cm->cleanup(); } }; ! double G1ConcurrentMarkThread::mmu_delay_end(G1Policy* policy, bool remark) { // There are 3 reasons to use SuspendibleThreadSetJoiner. // 1. To avoid concurrency problem. // - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called // concurrently from ConcurrentMarkThread and VMThread. // 2. If currently a gc is running, but it has not yet updated the MMU, // we will not forget to consider that pause in the MMU calculation. // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished. // And then sleep for predicted amount of time by delay_to_keep_mmu(). SuspendibleThreadSetJoiner sts_join; ! const G1Analytics* analytics = policy->analytics(); double prediction_ms = remark ? analytics->predict_remark_time_ms() : analytics->predict_cleanup_time_ms(); double prediction = prediction_ms / MILLIUNITS; ! G1MMUTracker *mmu_tracker = policy->mmu_tracker(); double now = os::elapsedTime(); return now + mmu_tracker->when_sec(now, prediction); } ! void G1ConcurrentMarkThread::delay_to_keep_mmu(bool remark) { ! G1Policy* policy = G1CollectedHeap::heap()->policy(); ! ! if (policy->use_adaptive_young_list_length()) { ! double delay_end_sec = mmu_delay_end(policy, remark); // Wait for timeout or thread termination request. MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag); ! while (!_cm->has_aborted() && !should_terminate()) { double sleep_time_sec = (delay_end_sec - os::elapsedTime()); jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS); if (sleep_time_ms <= 0) { break; // Passed end time. } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) { break; // Timeout => reached end time. } // Other (possibly spurious) wakeup. Retry with updated sleep time. } } }
*** 134,300 **** }; void G1ConcurrentMarkThread::run_service() { _vtime_start = os::elapsedVTime(); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1Policy* policy = g1h->policy(); - while (!should_terminate()) { ! // wait until started is set. ! sleep_before_next_cycle(); ! if (should_terminate()) { break; } GCIdMark gc_id_mark; ! _cm->concurrent_cycle_start(); ! GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); ! { ! ResourceMark rm; ! double cycle_start = os::elapsedVTime(); ! { ! G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks"); ! ClassLoaderDataGraph::clear_claimed_marks(); } ! // We have to ensure that we finish scanning the root regions ! // before the next GC takes place. To ensure this we have to ! // make sure that we do not join the STS until the root regions ! // have been scanned. If we did then it's possible that a ! // subsequent GC could block us from joining the STS and proceed ! // without the root regions have been scanned which would be a ! // correctness issue. ! { G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions"); _cm->scan_root_regions(); } ! // Note: ConcurrentGCBreakpoints before here risk deadlock, ! // because a young GC must wait for root region scanning. ! // It would be nice to use the G1ConcPhaseTimer class here but ! // the "end" logging is inside the loop and not at the end of ! // a scope. Also, the timer doesn't support nesting. ! // Mimicking the same log output instead. ! jlong mark_start = os::elapsed_counter(); ! log_info(gc, marking)("Concurrent Mark (%.3fs)", ! TimeHelper::counter_to_seconds(mark_start)); ! for (uint iter = 1; !_cm->has_aborted(); ++iter) { ! // Concurrent marking. ! { ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots"); _cm->mark_from_roots(); ! } ! if (_cm->has_aborted()) { ! break; ! } ! if (G1UseReferencePrecleaning) { G1ConcPhaseTimer p(_cm, "Concurrent Preclean"); _cm->preclean(); ! } ! if (_cm->has_aborted()) { ! break; ! } ! // Delay remark pause for MMU. ! double mark_end_time = os::elapsedVTime(); ! jlong mark_end = os::elapsed_counter(); ! _vtime_mark_accum += (mark_end_time - cycle_start); ! delay_to_keep_mmu(policy, true /* remark */); ! if (_cm->has_aborted()) { ! break; ! } ! // Pause Remark. ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); - log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", - TimeHelper::counter_to_seconds(mark_start), - TimeHelper::counter_to_seconds(mark_end), - TimeHelper::counter_to_millis(mark_end - mark_start)); CMRemark cl(_cm); VM_G1Concurrent op(&cl, "Pause Remark"); VMThread::execute(&op); ! if (_cm->has_aborted()) { ! break; ! } else if (!_cm->restart_for_overflow()) { ! break; // Exit loop if no restart requested. ! } else { ! // Loop to restart for overflow. ! log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)", ! iter); ! } ! } ! if (!_cm->has_aborted()) { G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets"); _cm->rebuild_rem_set_concurrently(); ! } ! double end_time = os::elapsedVTime(); ! // Update the total virtual time before doing this, since it will try ! // to measure it to get the vtime for this marking. ! _vtime_accum = (end_time - _vtime_start); ! ! if (!_cm->has_aborted()) { ! delay_to_keep_mmu(policy, false /* cleanup */); ! } ! if (!_cm->has_aborted()) { ! CMCleanup cl_cl(_cm); ! VM_G1Concurrent op(&cl_cl, "Pause Cleanup"); VMThread::execute(&op); ! } ! // We now want to allow clearing of the marking bitmap to be ! // suspended by a collection pause. ! // We may have aborted just before the remark. Do not bother clearing the ! // bitmap then, as it has been done during mark abort. ! if (!_cm->has_aborted()) { G1ConcPhaseTimer p(_cm, "Concurrent Cleanup for Next Mark"); _cm->cleanup_for_next_mark(); ! } ! } // Update the number of full collections that have been // completed. This will also notify the G1OldGCCount_lock in case a // Java thread is waiting for a full GC to happen (e.g., it // called System.gc() with +ExplicitGCInvokesConcurrent). - { SuspendibleThreadSetJoiner sts_join; ! g1h->increment_old_marking_cycles_completed(true /* concurrent */, ! !_cm->has_aborted() /* liveness_completed */); _cm->concurrent_cycle_end(); ConcurrentGCBreakpoints::notify_active_to_idle(); - } - } - _cm->root_regions()->cancel_scan(); - } - - void G1ConcurrentMarkThread::stop_service() { - MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); - CGC_lock->notify_all(); - } - - - void G1ConcurrentMarkThread::sleep_before_next_cycle() { - // We join here because we don't want to do the "shouldConcurrentMark()" - // below while the world is otherwise stopped. - assert(!in_progress(), "should have been cleared"); - - MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); - while (!started() && !should_terminate()) { - ml.wait(); - } - - if (started()) { - set_in_progress(); - } } --- 132,319 ---- }; void G1ConcurrentMarkThread::run_service() { _vtime_start = os::elapsedVTime(); while (!should_terminate()) { ! if (wait_for_next_cycle()) { break; } GCIdMark gc_id_mark; + GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); ! concurrent_cycle_start(); ! full_concurrent_cycle_do(); ! concurrent_cycle_end(); ! _vtime_accum = (os::elapsedVTime() - _vtime_start); ! } ! _cm->root_regions()->cancel_scan(); ! } ! ! void G1ConcurrentMarkThread::stop_service() { ! MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); ! CGC_lock->notify_all(); ! } ! bool G1ConcurrentMarkThread::wait_for_next_cycle() { ! assert(!in_progress(), "should have been cleared"); ! MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); ! while (!started() && !should_terminate()) { ! ml.wait(); } ! if (started()) { ! set_in_progress(); ! } ! return should_terminate(); ! } ! ! bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() { ! G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks"); ! ClassLoaderDataGraph::clear_claimed_marks(); ! return _cm->has_aborted(); ! } ! ! bool G1ConcurrentMarkThread::phase_scan_root_regions() { G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions"); _cm->scan_root_regions(); + return _cm->has_aborted(); + } + + bool G1ConcurrentMarkThread::phase_mark_loop() { + Ticks mark_start = Ticks::now(); + log_info(gc, marking)("Concurrent Mark (%.3fs)", mark_start.seconds()); + + uint iter = 1; + while (true) { + // Subphase 1: Mark From Roots. + if (subphase_mark_from_roots()) return true; + + // Subphase 2: Preclean (optional) + if (G1UseReferencePrecleaning) { + if (subphase_preclean()) return true; } ! // Subphase 3: Wait for Remark. ! if (subphase_delay_to_keep_mmu_before_remark()) return true; ! // Subphase 4: Remark pause ! if (subphase_remark()) return true; ! ! // Check if we need to restart the marking loop. ! if (!mark_loop_needs_restart()) break; ! ! log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)", ! iter++); ! } ! ! Ticks mark_end = Ticks::now(); ! log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", ! mark_start.seconds(), mark_end.seconds(), ! (mark_end - mark_start).seconds() * 1000.0); ! ! return false; ! } ! ! bool G1ConcurrentMarkThread::mark_loop_needs_restart() const { ! return _cm->has_overflown(); ! } ! ! bool G1ConcurrentMarkThread::subphase_mark_from_roots() { ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots"); _cm->mark_from_roots(); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::subphase_preclean() { G1ConcPhaseTimer p(_cm, "Concurrent Preclean"); _cm->preclean(); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::subphase_delay_to_keep_mmu_before_remark() { ! delay_to_keep_mmu(true /* remark */); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::subphase_remark() { ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); CMRemark cl(_cm); VM_G1Concurrent op(&cl, "Pause Remark"); VMThread::execute(&op); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets() { G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets"); _cm->rebuild_rem_set_concurrently(); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_cleanup() { ! delay_to_keep_mmu(false /* cleanup */); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::phase_cleanup() { ! CMCleanup cl(_cm); ! VM_G1Concurrent op(&cl, "Pause Cleanup"); VMThread::execute(&op); ! return _cm->has_aborted(); ! } ! bool G1ConcurrentMarkThread::phase_clear_bitmap_for_next_mark() { G1ConcPhaseTimer p(_cm, "Concurrent Cleanup for Next Mark"); _cm->cleanup_for_next_mark(); ! return _cm->has_aborted(); ! } ! ! void G1ConcurrentMarkThread::concurrent_cycle_start() { ! _cm->concurrent_cycle_start(); ! } ! ! void G1ConcurrentMarkThread::full_concurrent_cycle_do() { ! HandleMark hm(Thread::current()); ! ResourceMark rm; ! ! // Phase 1: Clear CLD claimed marks. ! phase_clear_cld_claimed_marks(); ! ! // Do not return before the scan root regions phase as a GC waits for a ! // notification from it. ! ! // Phase 2: Scan root regions. ! if (phase_scan_root_regions()) return; + // Phase 3: Actual mark loop. + if (phase_mark_loop()) return; + + // Phase 4: Rebuild remembered sets. + if (phase_rebuild_remembered_sets()) return; + + // Phase 5: Wait for Cleanup. + if (phase_delay_to_keep_mmu_before_cleanup()) return; + + // Phase 6: Cleanup pause + if (phase_cleanup()) return; + + // Phase 7: Clear bitmap for next mark. + phase_clear_bitmap_for_next_mark(); + } + + void G1ConcurrentMarkThread::concurrent_cycle_end() { // Update the number of full collections that have been // completed. This will also notify the G1OldGCCount_lock in case a // Java thread is waiting for a full GC to happen (e.g., it // called System.gc() with +ExplicitGCInvokesConcurrent). SuspendibleThreadSetJoiner sts_join; ! G1CollectedHeap::heap()->increment_old_marking_cycles_completed(true /* concurrent */, ! !_cm->has_aborted()); _cm->concurrent_cycle_end(); ConcurrentGCBreakpoints::notify_active_to_idle(); }
< prev index next >