73
74 CMCleanUp(ConcurrentMark* cm) :
75 _cm(cm) {}
76
77 void do_void(){
78 _cm->cleanup();
79 }
80 };
81
82 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
83 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
84 if (g1_policy->adaptive_young_list_length()) {
85 double now = os::elapsedTime();
86 double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
87 : g1_policy->predict_cleanup_time_ms();
88 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
89 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
90 os::sleep(this, sleep_time_ms, false);
91 }
92 }
93 void ConcurrentMarkThread::run() {
94 initialize_in_thread();
95 wait_for_universe_init();
96
97 run_service();
98
99 terminate();
100 }
101
102 void ConcurrentMarkThread::run_service() {
103 _vtime_start = os::elapsedVTime();
104
105 G1CollectedHeap* g1h = G1CollectedHeap::heap();
106 G1CollectorPolicy* g1_policy = g1h->g1_policy();
107
108 while (!_should_terminate) {
109 // wait until started is set.
110 sleepBeforeNextCycle();
111 if (_should_terminate) {
112 break;
113 }
114
115 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
116 {
117 ResourceMark rm;
118 HandleMark hm;
119 double cycle_start = os::elapsedVTime();
120
121 // We have to ensure that we finish scanning the root regions
122 // before the next GC takes place. To ensure this we have to
123 // make sure that we do not join the STS until the root regions
124 // have been scanned. If we did then it's possible that a
125 // subsequent GC could block us from joining the STS and proceed
126 // without the root regions have been scanned which would be a
127 // correctness issue.
128
129 if (!cm()->has_aborted()) {
130 _cm->scanRootRegions();
131 }
132
133 // It would be nice to use the GCTraceConcTime class here but
134 // the "end" logging is inside the loop and not at the end of
135 // a scope. Mimicking the same log output as GCTraceConcTime instead.
136 jlong mark_start = os::elapsed_counter();
137 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
138
139 int iter = 0;
140 do {
141 iter++;
142 if (!cm()->has_aborted()) {
143 _cm->markFromRoots();
144 }
145
146 double mark_end_time = os::elapsedVTime();
147 jlong mark_end = os::elapsed_counter();
148 _vtime_mark_accum += (mark_end_time - cycle_start);
149 if (!cm()->has_aborted()) {
150 delay_to_keep_mmu(g1_policy, true /* remark */);
151 log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
152 TimeHelper::counter_to_seconds(mark_start),
153 TimeHelper::counter_to_seconds(mark_end),
154 TimeHelper::counter_to_millis(mark_end - mark_start));
155
156 CMCheckpointRootsFinalClosure final_cl(_cm);
157 VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
158 VMThread::execute(&op);
159 }
160 if (cm()->restart_for_overflow()) {
161 log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
162 log_info(gc)("Concurrent Mark restart for overflow");
177 VMThread::execute(&op);
178 } else {
179 // We don't want to update the marking status if a GC pause
180 // is already underway.
181 SuspendibleThreadSetJoiner sts_join;
182 g1h->collector_state()->set_mark_in_progress(false);
183 }
184
185 // Check if cleanup set the free_regions_coming flag. If it
186 // hasn't, we can just skip the next step.
187 if (g1h->free_regions_coming()) {
188 // The following will finish freeing up any regions that we
189 // found to be empty during cleanup. We'll do this part
190 // without joining the suspendible set. If an evacuation pause
191 // takes place, then we would carry on freeing regions in
192 // case they are needed by the pause. If a Full GC takes
193 // place, it would wait for us to process the regions
194 // reclaimed by cleanup.
195
196 GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
197
198 // Now do the concurrent cleanup operation.
199 _cm->completeCleanup();
200
201 // Notify anyone who's waiting that there are no more free
202 // regions coming. We have to do this before we join the STS
203 // (in fact, we should not attempt to join the STS in the
204 // interval between finishing the cleanup pause and clearing
205 // the free_regions_coming flag) otherwise we might deadlock:
206 // a GC worker could be blocked waiting for the notification
207 // whereas this thread will be blocked for the pause to finish
208 // while it's trying to join the STS, which is conditional on
209 // the GC workers finishing.
210 g1h->reset_free_regions_coming();
211 }
212 guarantee(cm()->cleanup_list_is_empty(),
213 "at this point there should be no regions on the cleanup list");
214
215 // There is a tricky race before recording that the concurrent
216 // cleanup has completed and a potential Full GC starting around
233 // joining the STS until the Full GC is done, which means that
234 // abort() will have completed and has_aborted() will return
235 // true to prevent us from calling
236 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
237 // not needed any more as the concurrent mark state has been
238 // already reset).
239 {
240 SuspendibleThreadSetJoiner sts_join;
241 if (!cm()->has_aborted()) {
242 g1_policy->record_concurrent_mark_cleanup_completed();
243 } else {
244 log_info(gc)("Concurrent Mark abort");
245 }
246 }
247
248 // We now want to allow clearing of the marking bitmap to be
249 // suspended by a collection pause.
250 // We may have aborted just before the remark. Do not bother clearing the
251 // bitmap then, as it has been done during mark abort.
252 if (!cm()->has_aborted()) {
253 _cm->clearNextBitmap();
254 } else {
255 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
256 }
257 }
258
259 // Update the number of full collections that have been
260 // completed. This will also notify the FullGCCount_lock in case a
261 // Java thread is waiting for a full GC to happen (e.g., it
262 // called System.gc() with +ExplicitGCInvokesConcurrent).
263 {
264 SuspendibleThreadSetJoiner sts_join;
265 g1h->increment_old_marking_cycles_completed(true /* concurrent */);
266 g1h->register_concurrent_cycle_end();
267 }
268 }
269 }
270
271 void ConcurrentMarkThread::stop() {
272 {
|
73
74 CMCleanUp(ConcurrentMark* cm) :
75 _cm(cm) {}
76
77 void do_void(){
78 _cm->cleanup();
79 }
80 };
81
82 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
83 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
84 if (g1_policy->adaptive_young_list_length()) {
85 double now = os::elapsedTime();
86 double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
87 : g1_policy->predict_cleanup_time_ms();
88 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
89 jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
90 os::sleep(this, sleep_time_ms, false);
91 }
92 }
93
94 class GCConcPhaseTimer : StackObj {
95 ConcurrentMark* _cm;
96
97 public:
98 GCConcPhaseTimer(ConcurrentMark* cm, const char* title) : _cm(cm) {
99 _cm->register_concurrent_phase_start(title);
100 }
101
102 ~GCConcPhaseTimer() {
103 _cm->register_concurrent_phase_end();
104 }
105 };
106
107 void ConcurrentMarkThread::run() {
108 initialize_in_thread();
109 wait_for_universe_init();
110
111 run_service();
112
113 terminate();
114 }
115
116 void ConcurrentMarkThread::run_service() {
117 _vtime_start = os::elapsedVTime();
118
119 G1CollectedHeap* g1h = G1CollectedHeap::heap();
120 G1CollectorPolicy* g1_policy = g1h->g1_policy();
121
122 while (!_should_terminate) {
123 // wait until started is set.
124 sleepBeforeNextCycle();
125 if (_should_terminate) {
126 break;
127 }
128
129 assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
130 {
131 ResourceMark rm;
132 HandleMark hm;
133 double cycle_start = os::elapsedVTime();
134
135 // We have to ensure that we finish scanning the root regions
136 // before the next GC takes place. To ensure this we have to
137 // make sure that we do not join the STS until the root regions
138 // have been scanned. If we did then it's possible that a
139 // subsequent GC could block us from joining the STS and proceed
140 // without the root regions have been scanned which would be a
141 // correctness issue.
142
143 if (!cm()->has_aborted()) {
144 GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning");
145 _cm->scanRootRegions();
146 }
147
148 // It would be nice to use the GCTraceConcTime class here but
149 // the "end" logging is inside the loop and not at the end of
150 // a scope. Mimicking the same log output as GCTraceConcTime instead.
151 jlong mark_start = os::elapsed_counter();
152 log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
153
154 int iter = 0;
155 do {
156 iter++;
157 if (!cm()->has_aborted()) {
158 GCConcPhaseTimer(_cm, "Concurrent Mark");
159 _cm->markFromRoots();
160 }
161
162 double mark_end_time = os::elapsedVTime();
163 jlong mark_end = os::elapsed_counter();
164 _vtime_mark_accum += (mark_end_time - cycle_start);
165 if (!cm()->has_aborted()) {
166 delay_to_keep_mmu(g1_policy, true /* remark */);
167 log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
168 TimeHelper::counter_to_seconds(mark_start),
169 TimeHelper::counter_to_seconds(mark_end),
170 TimeHelper::counter_to_millis(mark_end - mark_start));
171
172 CMCheckpointRootsFinalClosure final_cl(_cm);
173 VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
174 VMThread::execute(&op);
175 }
176 if (cm()->restart_for_overflow()) {
177 log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
178 log_info(gc)("Concurrent Mark restart for overflow");
193 VMThread::execute(&op);
194 } else {
195 // We don't want to update the marking status if a GC pause
196 // is already underway.
197 SuspendibleThreadSetJoiner sts_join;
198 g1h->collector_state()->set_mark_in_progress(false);
199 }
200
201 // Check if cleanup set the free_regions_coming flag. If it
202 // hasn't, we can just skip the next step.
203 if (g1h->free_regions_coming()) {
204 // The following will finish freeing up any regions that we
205 // found to be empty during cleanup. We'll do this part
206 // without joining the suspendible set. If an evacuation pause
207 // takes place, then we would carry on freeing regions in
208 // case they are needed by the pause. If a Full GC takes
209 // place, it would wait for us to process the regions
210 // reclaimed by cleanup.
211
212 GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
213 GCConcPhaseTimer(_cm, "Concurrent Cleanup");
214
215 // Now do the concurrent cleanup operation.
216 _cm->completeCleanup();
217
218 // Notify anyone who's waiting that there are no more free
219 // regions coming. We have to do this before we join the STS
220 // (in fact, we should not attempt to join the STS in the
221 // interval between finishing the cleanup pause and clearing
222 // the free_regions_coming flag) otherwise we might deadlock:
223 // a GC worker could be blocked waiting for the notification
224 // whereas this thread will be blocked for the pause to finish
225 // while it's trying to join the STS, which is conditional on
226 // the GC workers finishing.
227 g1h->reset_free_regions_coming();
228 }
229 guarantee(cm()->cleanup_list_is_empty(),
230 "at this point there should be no regions on the cleanup list");
231
232 // There is a tricky race before recording that the concurrent
233 // cleanup has completed and a potential Full GC starting around
250 // joining the STS until the Full GC is done, which means that
251 // abort() will have completed and has_aborted() will return
252 // true to prevent us from calling
253 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
254 // not needed any more as the concurrent mark state has been
255 // already reset).
256 {
257 SuspendibleThreadSetJoiner sts_join;
258 if (!cm()->has_aborted()) {
259 g1_policy->record_concurrent_mark_cleanup_completed();
260 } else {
261 log_info(gc)("Concurrent Mark abort");
262 }
263 }
264
265 // We now want to allow clearing of the marking bitmap to be
266 // suspended by a collection pause.
267 // We may have aborted just before the remark. Do not bother clearing the
268 // bitmap then, as it has been done during mark abort.
269 if (!cm()->has_aborted()) {
270 GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing");
271 _cm->clearNextBitmap();
272 } else {
273 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
274 }
275 }
276
277 // Update the number of full collections that have been
278 // completed. This will also notify the FullGCCount_lock in case a
279 // Java thread is waiting for a full GC to happen (e.g., it
280 // called System.gc() with +ExplicitGCInvokesConcurrent).
281 {
282 SuspendibleThreadSetJoiner sts_join;
283 g1h->increment_old_marking_cycles_completed(true /* concurrent */);
284 g1h->register_concurrent_cycle_end();
285 }
286 }
287 }
288
289 void ConcurrentMarkThread::stop() {
290 {
|