106 107 void ConcurrentMarkThread::run() { 108 initialize_in_thread(); 109 wait_for_universe_init(); 110 111 run_service(); 112 113 terminate(); 114 } 115 116 void ConcurrentMarkThread::run_service() { 117 _vtime_start = os::elapsedVTime(); 118 119 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 120 G1CollectorPolicy* g1_policy = g1h->g1_policy(); 121 122 while (!_should_terminate) { 123 // wait until started is set. 124 sleepBeforeNextCycle(); 125 if (_should_terminate) { 126 break; 127 } 128 129 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC."); 130 { 131 ResourceMark rm; 132 HandleMark hm; 133 double cycle_start = os::elapsedVTime(); 134 135 // We have to ensure that we finish scanning the root regions 136 // before the next GC takes place. To ensure this we have to 137 // make sure that we do not join the STS until the root regions 138 // have been scanned. If we did then it's possible that a 139 // subsequent GC could block us from joining the STS and proceed 140 // without the root regions have been scanned which would be a 141 // correctness issue. 142 143 if (!cm()->has_aborted()) { 144 GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning"); 145 _cm->scanRootRegions(); 146 } 147 148 // It would be nice to use the GCTraceConcTime class here but 149 // the "end" logging is inside the loop and not at the end of 150 // a scope. Mimicking the same log output as GCTraceConcTime instead. 151 jlong mark_start = os::elapsed_counter(); 152 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); 153 154 int iter = 0; 155 do { 156 iter++; 157 if (!cm()->has_aborted()) { 158 GCConcPhaseTimer(_cm, "Concurrent Mark"); 159 _cm->markFromRoots(); 160 } 161 162 double mark_end_time = os::elapsedVTime(); 163 jlong mark_end = os::elapsed_counter(); 164 _vtime_mark_accum += (mark_end_time - cycle_start); 165 if (!cm()->has_aborted()) { 166 delay_to_keep_mmu(g1_policy, true /* remark */); | 106 107 void ConcurrentMarkThread::run() { 108 initialize_in_thread(); 109 wait_for_universe_init(); 110 111 run_service(); 112 113 terminate(); 114 } 115 116 void ConcurrentMarkThread::run_service() { 117 _vtime_start = os::elapsedVTime(); 118 119 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 120 G1CollectorPolicy* g1_policy = g1h->g1_policy(); 121 122 while (!_should_terminate) { 123 // wait until started is set. 124 sleepBeforeNextCycle(); 125 if (_should_terminate) { 126 _cm->root_regions()->cancel_scan(); 127 break; 128 } 129 130 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC."); 131 { 132 ResourceMark rm; 133 HandleMark hm; 134 double cycle_start = os::elapsedVTime(); 135 136 // We have to ensure that we finish scanning the root regions 137 // before the next GC takes place. To ensure this we have to 138 // make sure that we do not join the STS until the root regions 139 // have been scanned. If we did then it's possible that a 140 // subsequent GC could block us from joining the STS and proceed 141 // without the root regions have been scanned which would be a 142 // correctness issue. 143 144 assert(!cm()->has_aborted(), "Aborting before root region scanning is finished not supported."); 145 GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning"); 146 _cm->scanRootRegions(); 147 148 // It would be nice to use the GCTraceConcTime class here but 149 // the "end" logging is inside the loop and not at the end of 150 // a scope. Mimicking the same log output as GCTraceConcTime instead. 151 jlong mark_start = os::elapsed_counter(); 152 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); 153 154 int iter = 0; 155 do { 156 iter++; 157 if (!cm()->has_aborted()) { 158 GCConcPhaseTimer(_cm, "Concurrent Mark"); 159 _cm->markFromRoots(); 160 } 161 162 double mark_end_time = os::elapsedVTime(); 163 jlong mark_end = os::elapsed_counter(); 164 _vtime_mark_accum += (mark_end_time - cycle_start); 165 if (!cm()->has_aborted()) { 166 delay_to_keep_mmu(g1_policy, true /* remark */); |