1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "gc/g1/g1Analytics.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1ConcurrentMark.inline.hpp" 30 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1MMUTracker.hpp" 32 #include "gc/g1/g1Policy.hpp" 33 #include "gc/g1/g1RemSet.hpp" 34 #include "gc/g1/g1Trace.hpp" 35 #include "gc/g1/g1VMOperations.hpp" 36 #include "gc/shared/concurrentGCBreakpoints.hpp" 37 #include "gc/shared/gcId.hpp" 38 #include "gc/shared/gcTraceTime.inline.hpp" 39 #include "gc/shared/suspendibleThreadSet.hpp" 40 #include "logging/log.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/vmThread.hpp" 44 #include "utilities/debug.hpp" 45 #include "utilities/ticks.hpp" 46 47 G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) : 48 ConcurrentGCThread(), 49 _vtime_start(0.0), 50 _vtime_accum(0.0), 51 _cm(cm), 52 _state(Idle) 53 { 54 set_name("G1 Main Marker"); 55 create_and_start(); 56 } 57 58 class CMRemark : public VoidClosure { 59 G1ConcurrentMark* _cm; 60 public: 61 CMRemark(G1ConcurrentMark* cm) : _cm(cm) {} 62 63 void do_void(){ 64 _cm->remark(); 65 } 66 }; 67 68 class CMCleanup : public VoidClosure { 69 G1ConcurrentMark* _cm; 70 public: 71 CMCleanup(G1ConcurrentMark* cm) : _cm(cm) {} 72 73 void do_void(){ 74 _cm->cleanup(); 75 } 76 }; 77 78 double G1ConcurrentMarkThread::mmu_delay_end(G1Policy* policy, bool remark) { 79 // There are 3 reasons to use SuspendibleThreadSetJoiner. 80 // 1. To avoid concurrency problem. 81 // - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called 82 // concurrently from ConcurrentMarkThread and VMThread. 83 // 2. If currently a gc is running, but it has not yet updated the MMU, 84 // we will not forget to consider that pause in the MMU calculation. 85 // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished. 86 // And then sleep for predicted amount of time by delay_to_keep_mmu(). 87 SuspendibleThreadSetJoiner sts_join; 88 89 const G1Analytics* analytics = policy->analytics(); 90 double prediction_ms = remark ? analytics->predict_remark_time_ms() 91 : analytics->predict_cleanup_time_ms(); 92 double prediction = prediction_ms / MILLIUNITS; 93 G1MMUTracker *mmu_tracker = policy->mmu_tracker(); 94 double now = os::elapsedTime(); 95 return now + mmu_tracker->when_sec(now, prediction); 96 } 97 98 void G1ConcurrentMarkThread::delay_to_keep_mmu(bool remark) { 99 G1Policy* policy = G1CollectedHeap::heap()->policy(); 100 101 if (policy->use_adaptive_young_list_length()) { 102 double delay_end_sec = mmu_delay_end(policy, remark); 103 // Wait for timeout or thread termination request. 104 MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag); 105 while (!_cm->has_aborted() && !should_terminate()) { 106 double sleep_time_sec = (delay_end_sec - os::elapsedTime()); 107 jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS); 108 if (sleep_time_ms <= 0) { 109 break; // Passed end time. 110 } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) { 111 break; // Timeout => reached end time. 112 } 113 // Other (possibly spurious) wakeup. Retry with updated sleep time. 114 } 115 } 116 } 117 118 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> { 119 G1ConcurrentMark* _cm; 120 121 public: 122 G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : 123 GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title), 124 _cm(cm) 125 { 126 _cm->gc_timer_cm()->register_gc_concurrent_start(title); 127 } 128 129 ~G1ConcPhaseTimer() { 130 _cm->gc_timer_cm()->register_gc_concurrent_end(); 131 } 132 }; 133 134 void G1ConcurrentMarkThread::run_service() { 135 _vtime_start = os::elapsedVTime(); 136 137 while (!should_terminate()) { 138 if (wait_for_next_cycle()) { 139 break; 140 } 141 142 GCIdMark gc_id_mark; 143 GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); 144 145 concurrent_cycle_start(); 146 full_concurrent_cycle_do(); 147 concurrent_cycle_end(); 148 149 _vtime_accum = (os::elapsedVTime() - _vtime_start); 150 } 151 _cm->root_regions()->cancel_scan(); 152 } 153 154 void G1ConcurrentMarkThread::stop_service() { 155 MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); 156 CGC_lock->notify_all(); 157 } 158 159 bool G1ConcurrentMarkThread::wait_for_next_cycle() { 160 assert(!in_progress(), "should have been cleared"); 161 162 MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); 163 while (!started() && !should_terminate()) { 164 ml.wait(); 165 } 166 167 if (started()) { 168 set_in_progress(); 169 } 170 171 return should_terminate(); 172 } 173 174 bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() { 175 G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks"); 176 ClassLoaderDataGraph::clear_claimed_marks(); 177 return _cm->has_aborted(); 178 } 179 180 bool G1ConcurrentMarkThread::phase_scan_root_regions() { 181 G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions"); 182 _cm->scan_root_regions(); 183 return _cm->has_aborted(); 184 } 185 186 bool G1ConcurrentMarkThread::phase_mark_loop() { 187 Ticks mark_start = Ticks::now(); 188 log_info(gc, marking)("Concurrent Mark (%.3fs)", mark_start.seconds()); 189 190 uint iter = 1; 191 while (true) { 192 // Subphase 1: Mark From Roots. 193 if (subphase_mark_from_roots()) return true; 194 195 // Subphase 2: Preclean (optional) 196 if (G1UseReferencePrecleaning) { 197 if (subphase_preclean()) return true; 198 } 199 200 // Subphase 3: Wait for Remark. 201 if (subphase_delay_to_keep_mmu_before_remark()) return true; 202 203 // Subphase 4: Remark pause 204 if (subphase_remark()) return true; 205 206 // Check if we need to restart the marking loop. 207 if (!mark_loop_needs_restart()) break; 208 209 log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)", 210 iter++); 211 } 212 213 Ticks mark_end = Ticks::now(); 214 log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", 215 mark_start.seconds(), mark_end.seconds(), 216 (mark_end - mark_start).seconds() * 1000.0); 217 218 return false; 219 } 220 221 bool G1ConcurrentMarkThread::mark_loop_needs_restart() const { 222 return _cm->has_overflown(); 223 } 224 225 bool G1ConcurrentMarkThread::subphase_mark_from_roots() { 226 ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); 227 G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots"); 228 _cm->mark_from_roots(); 229 return _cm->has_aborted(); 230 } 231 232 bool G1ConcurrentMarkThread::subphase_preclean() { 233 G1ConcPhaseTimer p(_cm, "Concurrent Preclean"); 234 _cm->preclean(); 235 return _cm->has_aborted(); 236 } 237 238 bool G1ConcurrentMarkThread::subphase_delay_to_keep_mmu_before_remark() { 239 delay_to_keep_mmu(true /* remark */); 240 return _cm->has_aborted(); 241 } 242 243 bool G1ConcurrentMarkThread::subphase_remark() { 244 ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); 245 CMRemark cl(_cm); 246 VM_G1Concurrent op(&cl, "Pause Remark"); 247 VMThread::execute(&op); 248 return _cm->has_aborted(); 249 } 250 251 bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets() { 252 G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets"); 253 _cm->rebuild_rem_set_concurrently(); 254 return _cm->has_aborted(); 255 } 256 257 bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_cleanup() { 258 delay_to_keep_mmu(false /* cleanup */); 259 return _cm->has_aborted(); 260 } 261 262 bool G1ConcurrentMarkThread::phase_cleanup() { 263 CMCleanup cl(_cm); 264 VM_G1Concurrent op(&cl, "Pause Cleanup"); 265 VMThread::execute(&op); 266 return _cm->has_aborted(); 267 } 268 269 bool G1ConcurrentMarkThread::phase_clear_bitmap_for_next_mark() { 270 G1ConcPhaseTimer p(_cm, "Concurrent Cleanup for Next Mark"); 271 _cm->cleanup_for_next_mark(); 272 return _cm->has_aborted(); 273 } 274 275 void G1ConcurrentMarkThread::concurrent_cycle_start() { 276 _cm->concurrent_cycle_start(); 277 } 278 279 void G1ConcurrentMarkThread::full_concurrent_cycle_do() { 280 HandleMark hm(Thread::current()); 281 ResourceMark rm; 282 283 // Phase 1: Clear CLD claimed marks. 284 phase_clear_cld_claimed_marks(); 285 286 // Do not return before the scan root regions phase as a GC waits for a 287 // notification from it. 288 289 // Phase 2: Scan root regions. 290 if (phase_scan_root_regions()) return; 291 292 // Phase 3: Actual mark loop. 293 if (phase_mark_loop()) return; 294 295 // Phase 4: Rebuild remembered sets. 296 if (phase_rebuild_remembered_sets()) return; 297 298 // Phase 5: Wait for Cleanup. 299 if (phase_delay_to_keep_mmu_before_cleanup()) return; 300 301 // Phase 6: Cleanup pause 302 if (phase_cleanup()) return; 303 304 // Phase 7: Clear bitmap for next mark. 305 phase_clear_bitmap_for_next_mark(); 306 } 307 308 void G1ConcurrentMarkThread::concurrent_cycle_end() { 309 // Update the number of full collections that have been 310 // completed. This will also notify the G1OldGCCount_lock in case a 311 // Java thread is waiting for a full GC to happen (e.g., it 312 // called System.gc() with +ExplicitGCInvokesConcurrent). 313 SuspendibleThreadSetJoiner sts_join; 314 G1CollectedHeap::heap()->increment_old_marking_cycles_completed(true /* concurrent */, 315 !_cm->has_aborted()); 316 317 _cm->concurrent_cycle_end(); 318 ConcurrentGCBreakpoints::notify_active_to_idle(); 319 }