139
140 // We have to ensure that we finish scanning the root regions
141 // before the next GC takes place. To ensure this we have to
142 // make sure that we do not join the STS until the root regions
143 // have been scanned. If we did then it's possible that a
144 // subsequent GC could block us from joining the STS and proceed
145 // without the root regions have been scanned which would be a
146 // correctness issue.
147
148 {
149 G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");
150 _cm->scan_root_regions();
151 }
152
153 // It would be nice to use the GCTraceConcTime class here but
154 // the "end" logging is inside the loop and not at the end of
155 // a scope. Mimicking the same log output as GCTraceConcTime instead.
156 jlong mark_start = os::elapsed_counter();
157 log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
158
159 int iter = 0;
160 do {
161 iter++;
162 if (!cm()->has_aborted()) {
163 G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
164 _cm->mark_from_roots();
165 }
166
167 double mark_end_time = os::elapsedVTime();
168 jlong mark_end = os::elapsed_counter();
169 _vtime_mark_accum += (mark_end_time - cycle_start);
170 if (!cm()->has_aborted()) {
171 delay_to_keep_mmu(g1_policy, true /* remark */);
172 log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
173 TimeHelper::counter_to_seconds(mark_start),
174 TimeHelper::counter_to_seconds(mark_end),
175 TimeHelper::counter_to_millis(mark_end - mark_start));
176
177 CMCheckpointRootsFinalClosure final_cl(_cm);
178 VM_CGC_Operation op(&final_cl, "Pause Remark");
179 VMThread::execute(&op);
180 }
181 if (cm()->restart_for_overflow()) {
182 log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
183 log_info(gc, marking)("Concurrent Mark Restart due to overflow");
184 }
185 } while (cm()->restart_for_overflow());
186
187 if (!cm()->has_aborted()) {
188 G1ConcPhaseTimer t(_cm, "Concurrent Create Live Data");
189 cm()->create_live_data();
190 }
191
192 double end_time = os::elapsedVTime();
193 // Update the total virtual time before doing this, since it will try
194 // to measure it to get the vtime for this marking. We purposely
195 // neglect the presumably-short "completeCleanup" phase here.
196 _vtime_accum = (end_time - _vtime_start);
197
198 if (!cm()->has_aborted()) {
199 delay_to_keep_mmu(g1_policy, false /* cleanup */);
200
201 CMCleanUp cl_cl(_cm);
202 VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
203 VMThread::execute(&op);
204 } else {
205 // We don't want to update the marking status if a GC pause
|
139
140 // We have to ensure that we finish scanning the root regions
141 // before the next GC takes place. To ensure this we have to
142 // make sure that we do not join the STS until the root regions
143 // have been scanned. If we did then it's possible that a
144 // subsequent GC could block us from joining the STS and proceed
145 // without the root regions have been scanned which would be a
146 // correctness issue.
147
148 {
149 G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");
150 _cm->scan_root_regions();
151 }
152
153 // It would be nice to use the GCTraceConcTime class here but
154 // the "end" logging is inside the loop and not at the end of
155 // a scope. Mimicking the same log output as GCTraceConcTime instead.
156 jlong mark_start = os::elapsed_counter();
157 log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
158
159 for (uint iter = 1; true; ++iter) {
160 if (!cm()->has_aborted()) {
161 G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
162 _cm->mark_from_roots();
163 }
164
165 double mark_end_time = os::elapsedVTime();
166 jlong mark_end = os::elapsed_counter();
167 _vtime_mark_accum += (mark_end_time - cycle_start);
168 if (!cm()->has_aborted()) {
169 delay_to_keep_mmu(g1_policy, true /* remark */);
170 log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
171 TimeHelper::counter_to_seconds(mark_start),
172 TimeHelper::counter_to_seconds(mark_end),
173 TimeHelper::counter_to_millis(mark_end - mark_start));
174
175 CMCheckpointRootsFinalClosure final_cl(_cm);
176 VM_CGC_Operation op(&final_cl, "Pause Remark");
177 VMThread::execute(&op);
178 }
179
180 if (!cm()->restart_for_overflow() || cm()->has_aborted()) {
181 break;
182 }
183
184 log_info(gc, marking)("Concurrent Mark Restart due to overflow"
185 " (iteration #%u", iter);
186 }
187
188 if (!cm()->has_aborted()) {
189 G1ConcPhaseTimer t(_cm, "Concurrent Create Live Data");
190 cm()->create_live_data();
191 }
192
193 double end_time = os::elapsedVTime();
194 // Update the total virtual time before doing this, since it will try
195 // to measure it to get the vtime for this marking. We purposely
196 // neglect the presumably-short "completeCleanup" phase here.
197 _vtime_accum = (end_time - _vtime_start);
198
199 if (!cm()->has_aborted()) {
200 delay_to_keep_mmu(g1_policy, false /* cleanup */);
201
202 CMCleanUp cl_cl(_cm);
203 VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
204 VMThread::execute(&op);
205 } else {
206 // We don't want to update the marking status if a GC pause
|