1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "gc/g1/g1Analytics.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1ConcurrentMark.inline.hpp" 30 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1MMUTracker.hpp" 32 #include "gc/g1/g1Policy.hpp" 33 #include "gc/g1/g1RemSet.hpp" 34 #include "gc/g1/g1Trace.hpp" 35 #include "gc/g1/g1VMOperations.hpp" 36 #include "gc/shared/concurrentGCBreakpoints.hpp" 37 #include "gc/shared/gcId.hpp" 38 #include "gc/shared/gcTraceTime.inline.hpp" 39 #include "gc/shared/suspendibleThreadSet.hpp" 40 #include "logging/log.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/vmThread.hpp" 44 #include "utilities/debug.hpp" 45 #include "utilities/ticks.hpp" 46 47 G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) : 48 ConcurrentGCThread(), 49 _vtime_start(0.0), 50 _vtime_accum(0.0), 51 _cm(cm), 52 _state(Idle) 53 { 54 set_name("G1 Main Marker"); 55 create_and_start(); 56 } 57 58 class CMRemark : public VoidClosure { 59 G1ConcurrentMark* _cm; 60 public: 61 CMRemark(G1ConcurrentMark* cm) : _cm(cm) {} 62 63 void do_void(){ 64 _cm->remark(); 65 } 66 }; 67 68 class CMCleanup : public VoidClosure { 69 G1ConcurrentMark* _cm; 70 public: 71 CMCleanup(G1ConcurrentMark* cm) : _cm(cm) {} 72 73 void do_void(){ 74 _cm->cleanup(); 75 } 76 }; 77 78 double G1ConcurrentMarkThread::mmu_delay_end(G1Policy* policy, bool remark) { 79 // There are 3 reasons to use SuspendibleThreadSetJoiner. 80 // 1. To avoid concurrency problem. 81 // - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called 82 // concurrently from ConcurrentMarkThread and VMThread. 83 // 2. If currently a gc is running, but it has not yet updated the MMU, 84 // we will not forget to consider that pause in the MMU calculation. 85 // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished. 86 // And then sleep for predicted amount of time by delay_to_keep_mmu(). 87 SuspendibleThreadSetJoiner sts_join; 88 89 const G1Analytics* analytics = policy->analytics(); 90 double prediction_ms = remark ? analytics->predict_remark_time_ms() 91 : analytics->predict_cleanup_time_ms(); 92 double prediction = prediction_ms / MILLIUNITS; 93 G1MMUTracker *mmu_tracker = policy->mmu_tracker(); 94 double now = os::elapsedTime(); 95 return now + mmu_tracker->when_sec(now, prediction); 96 } 97 98 void G1ConcurrentMarkThread::delay_to_keep_mmu(bool remark) { 99 G1Policy* policy = G1CollectedHeap::heap()->policy(); 100 101 if (policy->use_adaptive_young_list_length()) { 102 double delay_end_sec = mmu_delay_end(policy, remark); 103 // Wait for timeout or thread termination request. 104 MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag); 105 while (!_cm->has_aborted() && !should_terminate()) { 106 double sleep_time_sec = (delay_end_sec - os::elapsedTime()); 107 jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS); 108 if (sleep_time_ms <= 0) { 109 break; // Passed end time. 110 } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) { 111 break; // Timeout => reached end time. 112 } 113 // Other (possibly spurious) wakeup. Retry with updated sleep time. 114 } 115 } 116 } 117 118 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> { 119 G1ConcurrentMark* _cm; 120 const char* _t; 121 public: 122 G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : 123 GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title), 124 _cm(cm) 125 { 126 _cm->gc_timer_cm()->register_gc_concurrent_start(title); 127 } 128 129 ~G1ConcPhaseTimer() { 130 _cm->gc_timer_cm()->register_gc_concurrent_end(); 131 } 132 }; 133 134 void G1ConcurrentMarkThread::run_service() { 135 _vtime_start = os::elapsedVTime(); 136 137 while (!should_terminate()) { 138 if (wait_for_next_cycle()) { 139 break; 140 } 141 142 GCIdMark gc_id_mark; 143 GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); 144 145 concurrent_cycle_start(); 146 full_concurrent_cycle_do(); 147 concurrent_cycle_end(); 148 149 _vtime_accum = (os::elapsedVTime() - _vtime_start); 150 } 151 _cm->root_regions()->cancel_scan(); 152 } 153 154 void G1ConcurrentMarkThread::stop_service() { 155 MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); 156 CGC_lock->notify_all(); 157 } 158 159 bool G1ConcurrentMarkThread::wait_for_next_cycle() { 160 assert(!in_progress(), "should have been cleared"); 161 162 MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag); 163 while (!started() && !should_terminate()) { 164 ml.wait(); 165 } 166 167 if (started()) { 168 set_in_progress(); 169 } 170 171 return should_terminate(); 172 } 173 174 bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() { 175 G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks"); 176 ClassLoaderDataGraph::clear_claimed_marks(); 177 return _cm->has_aborted(); 178 } 179 180 bool G1ConcurrentMarkThread::phase_scan_root_regions() { 181 G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions"); 182 _cm->scan_root_regions(); 183 return _cm->has_aborted(); 184 } 185 186 bool G1ConcurrentMarkThread::phase_mark_from_roots() { 187 ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); 188 G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots"); 189 _cm->mark_from_roots(); 190 return _cm->has_aborted(); 191 } 192 193 bool G1ConcurrentMarkThread::phase_preclean() { 194 G1ConcPhaseTimer p(_cm, "Concurrent Preclean"); 195 _cm->preclean(); 196 return _cm->has_aborted(); 197 } 198 199 bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_remark() { 200 delay_to_keep_mmu(true /* remark */); 201 return _cm->has_aborted(); 202 } 203 204 bool G1ConcurrentMarkThread::phase_remark(bool& has_overflown) { 205 ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); 206 CMRemark cl(_cm); 207 VM_G1Concurrent op(&cl, "Pause Remark"); 208 VMThread::execute(&op); 209 has_overflown = _cm->has_overflown(); 210 return _cm->has_aborted(); 211 } 212 213 bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets() { 214 G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets"); 215 _cm->rebuild_rem_set_concurrently(); 216 return _cm->has_aborted(); 217 } 218 219 bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_cleanup() { 220 delay_to_keep_mmu(false /* cleanup */); 221 return _cm->has_aborted(); 222 } 223 224 bool G1ConcurrentMarkThread::phase_cleanup() { 225 CMCleanup cl(_cm); 226 VM_G1Concurrent op(&cl, "Pause Cleanup"); 227 VMThread::execute(&op); 228 return _cm->has_aborted(); 229 } 230 231 bool G1ConcurrentMarkThread::phase_clear_bitmap_for_next_mark() { 232 G1ConcPhaseTimer p(_cm, "Concurrent Cleanup for Next Mark"); 233 _cm->cleanup_for_next_mark(); 234 return _cm->has_aborted(); 235 } 236 237 void G1ConcurrentMarkThread::concurrent_cycle_start() { 238 _cm->concurrent_cycle_start(); 239 } 240 241 void G1ConcurrentMarkThread::full_concurrent_cycle_do() { 242 HandleMark hm(Thread::current()); 243 ResourceMark rm; 244 245 // Phase 1: Clear CLD claimed marks. 246 phase_clear_cld_claimed_marks(); 247 248 // Do not return before the scan root regions phase as a GC waits for a 249 // notification from it. 250 251 // Phase 2: Scan root regions. 252 if (phase_scan_root_regions()) return; 253 254 Ticks mark_start = Ticks::now(); 255 log_info(gc, marking)("Concurrent Mark (%.3fs)", mark_start.seconds()); 256 257 bool needs_restart; 258 uint iter = 1; 259 do { 260 // Phase 3: Mark From Roots. 261 if (phase_mark_from_roots()) return; 262 263 // Phase 4: Preclean (optional) 264 if (G1UseReferencePrecleaning) { 265 if (phase_preclean()) return; 266 } 267 268 // Phase 5: Wait for Remark. 269 if (phase_delay_to_keep_mmu_before_remark()) return; 270 271 // Phase 6: Remark pause 272 if (phase_remark(needs_restart)) return; 273 if (needs_restart) { 274 log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)", 275 iter++); 276 } 277 } while (needs_restart); 278 279 Ticks mark_end = Ticks::now(); 280 log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", 281 mark_start.seconds(), mark_end.seconds(), 282 (mark_end - mark_start).seconds() * 1000.0); 283 284 // Phase 7: Rebuild remembered sets. 285 if (phase_rebuild_remembered_sets()) return; 286 287 // Phase 8: Wait for Cleanup. 288 if (phase_delay_to_keep_mmu_before_cleanup()) return; 289 290 // Phase 9: Cleanup pause 291 if (phase_cleanup()) return; 292 293 // Phase 10: Clear bitmap for next mark. 294 phase_clear_bitmap_for_next_mark(); 295 } 296 297 void G1ConcurrentMarkThread::concurrent_cycle_end() { 298 // Update the number of full collections that have been 299 // completed. This will also notify the G1OldGCCount_lock in case a 300 // Java thread is waiting for a full GC to happen (e.g., it 301 // called System.gc() with +ExplicitGCInvokesConcurrent). 302 SuspendibleThreadSetJoiner sts_join; 303 G1CollectedHeap::heap()->increment_old_marking_cycles_completed(true /* concurrent */, 304 !_cm->has_aborted()); 305 306 _cm->concurrent_cycle_end(); 307 ConcurrentGCBreakpoints::notify_active_to_idle(); 308 }