75 CMCleanUp(G1ConcurrentMark* cm) : 76 _cm(cm) {} 77 78 void do_void(){ 79 _cm->cleanup(); 80 } 81 }; 82 83 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. 84 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { 85 if (g1_policy->adaptive_young_list_length()) { 86 double now = os::elapsedTime(); 87 double prediction_ms = remark ? g1_policy->predict_remark_time_ms() 88 : g1_policy->predict_cleanup_time_ms(); 89 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); 90 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms); 91 os::sleep(this, sleep_time_ms, false); 92 } 93 } 94 95 class GCConcPhaseTimer : StackObj { 96 G1ConcurrentMark* _cm; 97 98 public: 99 GCConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : _cm(cm) { 100 _cm->register_concurrent_phase_start(title); 101 } 102 103 ~GCConcPhaseTimer() { 104 _cm->register_concurrent_phase_end(); 105 } 106 }; 107 108 void ConcurrentMarkThread::run_service() { 109 _vtime_start = os::elapsedVTime(); 110 111 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 112 G1CollectorPolicy* g1_policy = g1h->g1_policy(); 113 114 while (!should_terminate()) { 115 // wait until started is set. 116 sleepBeforeNextCycle(); 117 if (should_terminate()) { 118 _cm->root_regions()->cancel_scan(); 119 break; 120 } 121 122 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC."); 123 { 124 ResourceMark rm; 125 HandleMark hm; 126 double cycle_start = os::elapsedVTime(); 127 128 { 129 GCConcPhaseTimer(_cm, "Concurrent Clearing of Claimed Marks"); 130 ClassLoaderDataGraph::clear_claimed_marks(); 131 } 132 133 // We have to ensure that we finish scanning the root regions 134 // before the next GC takes place. To ensure this we have to 135 // make sure that we do not join the STS until the root regions 136 // have been scanned. If we did then it's possible that a 137 // subsequent GC could block us from joining the STS and proceed 138 // without the root regions have been scanned which would be a 139 // correctness issue. 140 141 { 142 GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning"); 143 _cm->scanRootRegions(); 144 } 145 146 // It would be nice to use the GCTraceConcTime class here but 147 // the "end" logging is inside the loop and not at the end of 148 // a scope. Mimicking the same log output as GCTraceConcTime instead. 149 jlong mark_start = os::elapsed_counter(); 150 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); 151 152 int iter = 0; 153 do { 154 iter++; 155 if (!cm()->has_aborted()) { 156 GCConcPhaseTimer(_cm, "Concurrent Mark"); 157 _cm->markFromRoots(); 158 } 159 160 double mark_end_time = os::elapsedVTime(); 161 jlong mark_end = os::elapsed_counter(); 162 _vtime_mark_accum += (mark_end_time - cycle_start); 163 if (!cm()->has_aborted()) { 164 delay_to_keep_mmu(g1_policy, true /* remark */); 165 log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms", 166 TimeHelper::counter_to_seconds(mark_start), 167 TimeHelper::counter_to_seconds(mark_end), 168 TimeHelper::counter_to_millis(mark_end - mark_start)); 169 170 CMCheckpointRootsFinalClosure final_cl(_cm); 171 VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */); 172 VMThread::execute(&op); 173 } 174 if (cm()->restart_for_overflow()) { 175 log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter); 176 log_info(gc)("Concurrent Mark restart for overflow"); 177 } 178 } while (cm()->restart_for_overflow()); 179 180 double end_time = os::elapsedVTime(); 181 // Update the total virtual time before doing this, since it will try 182 // to measure it to get the vtime for this marking. We purposely 183 // neglect the presumably-short "completeCleanup" phase here. 184 _vtime_accum = (end_time - _vtime_start); 185 186 if (!cm()->has_aborted()) { 187 delay_to_keep_mmu(g1_policy, false /* cleanup */); 188 189 CMCleanUp cl_cl(_cm); 190 VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */); 191 VMThread::execute(&op); 192 } else { 193 // We don't want to update the marking status if a GC pause 194 // is already underway. 195 SuspendibleThreadSetJoiner sts_join; 196 g1h->collector_state()->set_mark_in_progress(false); 197 } 198 199 // Check if cleanup set the free_regions_coming flag. If it 200 // hasn't, we can just skip the next step. 201 if (g1h->free_regions_coming()) { 202 // The following will finish freeing up any regions that we 203 // found to be empty during cleanup. We'll do this part 204 // without joining the suspendible set. If an evacuation pause 205 // takes place, then we would carry on freeing regions in 206 // case they are needed by the pause. If a Full GC takes 207 // place, it would wait for us to process the regions 208 // reclaimed by cleanup. 209 210 GCTraceConcTime(Info, gc) tt("Concurrent Cleanup"); 211 GCConcPhaseTimer(_cm, "Concurrent Cleanup"); 212 213 // Now do the concurrent cleanup operation. 214 _cm->completeCleanup(); 215 216 // Notify anyone who's waiting that there are no more free 217 // regions coming. We have to do this before we join the STS 218 // (in fact, we should not attempt to join the STS in the 219 // interval between finishing the cleanup pause and clearing 220 // the free_regions_coming flag) otherwise we might deadlock: 221 // a GC worker could be blocked waiting for the notification 222 // whereas this thread will be blocked for the pause to finish 223 // while it's trying to join the STS, which is conditional on 224 // the GC workers finishing. 225 g1h->reset_free_regions_coming(); 226 } 227 guarantee(cm()->cleanup_list_is_empty(), 228 "at this point there should be no regions on the cleanup list"); 229 230 // There is a tricky race before recording that the concurrent 231 // cleanup has completed and a potential Full GC starting around 232 // the same time. We want to make sure that the Full GC calls 233 // abort() on concurrent mark after 234 // record_concurrent_mark_cleanup_completed(), since abort() is 239 // the STS allows the correct ordering of the two methods. There 240 // are two scenarios: 241 // 242 // a) If we reach here before the Full GC, the fact that we have 243 // joined the STS means that the Full GC cannot start until we 244 // leave the STS, so record_concurrent_mark_cleanup_completed() 245 // will complete before abort() is called. 246 // 247 // b) If we reach here during the Full GC, we'll be held up from 248 // joining the STS until the Full GC is done, which means that 249 // abort() will have completed and has_aborted() will return 250 // true to prevent us from calling 251 // record_concurrent_mark_cleanup_completed() (and, in fact, it's 252 // not needed any more as the concurrent mark state has been 253 // already reset). 254 { 255 SuspendibleThreadSetJoiner sts_join; 256 if (!cm()->has_aborted()) { 257 g1_policy->record_concurrent_mark_cleanup_completed(); 258 } else { 259 log_info(gc)("Concurrent Mark abort"); 260 } 261 } 262 263 // We now want to allow clearing of the marking bitmap to be 264 // suspended by a collection pause. 265 // We may have aborted just before the remark. Do not bother clearing the 266 // bitmap then, as it has been done during mark abort. 267 if (!cm()->has_aborted()) { 268 GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing"); 269 _cm->cleanup_for_next_mark(); 270 } else { 271 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear"); 272 } 273 } 274 275 // Update the number of full collections that have been 276 // completed. This will also notify the FullGCCount_lock in case a 277 // Java thread is waiting for a full GC to happen (e.g., it 278 // called System.gc() with +ExplicitGCInvokesConcurrent). 279 { 280 SuspendibleThreadSetJoiner sts_join; 281 g1h->increment_old_marking_cycles_completed(true /* concurrent */); 282 g1h->register_concurrent_cycle_end(); 283 } 284 } 285 } 286 287 void ConcurrentMarkThread::stop_service() { 288 MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); | 75 CMCleanUp(G1ConcurrentMark* cm) : 76 _cm(cm) {} 77 78 void do_void(){ 79 _cm->cleanup(); 80 } 81 }; 82 83 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. 84 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { 85 if (g1_policy->adaptive_young_list_length()) { 86 double now = os::elapsedTime(); 87 double prediction_ms = remark ? g1_policy->predict_remark_time_ms() 88 : g1_policy->predict_cleanup_time_ms(); 89 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); 90 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms); 91 os::sleep(this, sleep_time_ms, false); 92 } 93 } 94 95 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> { 96 G1ConcurrentMark* _cm; 97 98 public: 99 G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : 100 GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title), 101 _cm(cm) { 102 _cm->register_concurrent_phase_start(title); 103 } 104 105 ~G1ConcPhaseTimer() { 106 _cm->register_concurrent_phase_end(); 107 } 108 }; 109 110 void ConcurrentMarkThread::run_service() { 111 _vtime_start = os::elapsedVTime(); 112 113 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 114 G1CollectorPolicy* g1_policy = g1h->g1_policy(); 115 116 while (!should_terminate()) { 117 // wait until started is set. 118 sleepBeforeNextCycle(); 119 if (should_terminate()) { 120 _cm->root_regions()->cancel_scan(); 121 break; 122 } 123 124 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC."); 125 126 GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); 127 { 128 ResourceMark rm; 129 HandleMark hm; 130 double cycle_start = os::elapsedVTime(); 131 132 { 133 G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks"); 134 ClassLoaderDataGraph::clear_claimed_marks(); 135 } 136 137 // We have to ensure that we finish scanning the root regions 138 // before the next GC takes place. To ensure this we have to 139 // make sure that we do not join the STS until the root regions 140 // have been scanned. If we did then it's possible that a 141 // subsequent GC could block us from joining the STS and proceed 142 // without the root regions have been scanned which would be a 143 // correctness issue. 144 145 { 146 G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions"); 147 _cm->scan_root_regions(); 148 } 149 150 // It would be nice to use the GCTraceConcTime class here but 151 // the "end" logging is inside the loop and not at the end of 152 // a scope. Mimicking the same log output as GCTraceConcTime instead. 153 jlong mark_start = os::elapsed_counter(); 154 log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); 155 156 int iter = 0; 157 do { 158 iter++; 159 if (!cm()->has_aborted()) { 160 G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots"); 161 _cm->mark_from_roots(); 162 } 163 164 double mark_end_time = os::elapsedVTime(); 165 jlong mark_end = os::elapsed_counter(); 166 _vtime_mark_accum += (mark_end_time - cycle_start); 167 if (!cm()->has_aborted()) { 168 delay_to_keep_mmu(g1_policy, true /* remark */); 169 log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", 170 TimeHelper::counter_to_seconds(mark_start), 171 TimeHelper::counter_to_seconds(mark_end), 172 TimeHelper::counter_to_millis(mark_end - mark_start)); 173 174 CMCheckpointRootsFinalClosure final_cl(_cm); 175 VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */); 176 VMThread::execute(&op); 177 } 178 if (cm()->restart_for_overflow()) { 179 log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter); 180 log_info(gc, marking)("Concurrent Mark Restart due to overflow"); 181 } 182 } while (cm()->restart_for_overflow()); 183 184 double end_time = os::elapsedVTime(); 185 // Update the total virtual time before doing this, since it will try 186 // to measure it to get the vtime for this marking. We purposely 187 // neglect the presumably-short "completeCleanup" phase here. 188 _vtime_accum = (end_time - _vtime_start); 189 190 if (!cm()->has_aborted()) { 191 delay_to_keep_mmu(g1_policy, false /* cleanup */); 192 193 CMCleanUp cl_cl(_cm); 194 VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */); 195 VMThread::execute(&op); 196 } else { 197 // We don't want to update the marking status if a GC pause 198 // is already underway. 199 SuspendibleThreadSetJoiner sts_join; 200 g1h->collector_state()->set_mark_in_progress(false); 201 } 202 203 // Check if cleanup set the free_regions_coming flag. If it 204 // hasn't, we can just skip the next step. 205 if (g1h->free_regions_coming()) { 206 // The following will finish freeing up any regions that we 207 // found to be empty during cleanup. We'll do this part 208 // without joining the suspendible set. If an evacuation pause 209 // takes place, then we would carry on freeing regions in 210 // case they are needed by the pause. If a Full GC takes 211 // place, it would wait for us to process the regions 212 // reclaimed by cleanup. 213 214 G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup"); 215 // Now do the concurrent cleanup operation. 216 _cm->complete_cleanup(); 217 218 // Notify anyone who's waiting that there are no more free 219 // regions coming. We have to do this before we join the STS 220 // (in fact, we should not attempt to join the STS in the 221 // interval between finishing the cleanup pause and clearing 222 // the free_regions_coming flag) otherwise we might deadlock: 223 // a GC worker could be blocked waiting for the notification 224 // whereas this thread will be blocked for the pause to finish 225 // while it's trying to join the STS, which is conditional on 226 // the GC workers finishing. 227 g1h->reset_free_regions_coming(); 228 } 229 guarantee(cm()->cleanup_list_is_empty(), 230 "at this point there should be no regions on the cleanup list"); 231 232 // There is a tricky race before recording that the concurrent 233 // cleanup has completed and a potential Full GC starting around 234 // the same time. We want to make sure that the Full GC calls 235 // abort() on concurrent mark after 236 // record_concurrent_mark_cleanup_completed(), since abort() is 241 // the STS allows the correct ordering of the two methods. There 242 // are two scenarios: 243 // 244 // a) If we reach here before the Full GC, the fact that we have 245 // joined the STS means that the Full GC cannot start until we 246 // leave the STS, so record_concurrent_mark_cleanup_completed() 247 // will complete before abort() is called. 248 // 249 // b) If we reach here during the Full GC, we'll be held up from 250 // joining the STS until the Full GC is done, which means that 251 // abort() will have completed and has_aborted() will return 252 // true to prevent us from calling 253 // record_concurrent_mark_cleanup_completed() (and, in fact, it's 254 // not needed any more as the concurrent mark state has been 255 // already reset). 256 { 257 SuspendibleThreadSetJoiner sts_join; 258 if (!cm()->has_aborted()) { 259 g1_policy->record_concurrent_mark_cleanup_completed(); 260 } else { 261 log_info(gc, marking)("Concurrent Mark Abort"); 262 } 263 } 264 265 // We now want to allow clearing of the marking bitmap to be 266 // suspended by a collection pause. 267 // We may have aborted just before the remark. Do not bother clearing the 268 // bitmap then, as it has been done during mark abort. 269 if (!cm()->has_aborted()) { 270 G1ConcPhaseTimer t(_cm, "Concurrent Cleanup for Next Mark"); 271 _cm->cleanup_for_next_mark(); 272 } else { 273 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear"); 274 } 275 } 276 277 // Update the number of full collections that have been 278 // completed. This will also notify the FullGCCount_lock in case a 279 // Java thread is waiting for a full GC to happen (e.g., it 280 // called System.gc() with +ExplicitGCInvokesConcurrent). 281 { 282 SuspendibleThreadSetJoiner sts_join; 283 g1h->increment_old_marking_cycles_completed(true /* concurrent */); 284 g1h->register_concurrent_cycle_end(); 285 } 286 } 287 } 288 289 void ConcurrentMarkThread::stop_service() { 290 MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); |