< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp

Print this page
rev 60794 : imported patch 8247928-sjohanss-review


 100 
 101   if (policy->use_adaptive_young_list_length()) {
 102     double delay_end_sec = mmu_delay_end(policy, remark);
 103     // Wait for timeout or thread termination request.
 104     MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag);
 105     while (!_cm->has_aborted() && !should_terminate()) {
 106       double sleep_time_sec = (delay_end_sec - os::elapsedTime());
 107       jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS);
 108       if (sleep_time_ms <= 0) {
 109         break;                  // Passed end time.
 110       } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) {
 111         break;                  // Timeout => reached end time.
 112       }
 113       // Other (possibly spurious) wakeup.  Retry with updated sleep time.
 114     }
 115   }
 116 }
 117 
 118 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
 119   G1ConcurrentMark* _cm;
 120   const char* _t;
 121  public:
 122   G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
 123     GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
 124     _cm(cm)
 125   {
 126     _cm->gc_timer_cm()->register_gc_concurrent_start(title);
 127   }
 128 
 129   ~G1ConcPhaseTimer() {
 130     _cm->gc_timer_cm()->register_gc_concurrent_end();
 131   }
 132 };
 133 
 134 void G1ConcurrentMarkThread::run_service() {
 135   _vtime_start = os::elapsedVTime();
 136 
 137   while (!should_terminate()) {
 138     if (wait_for_next_cycle()) {
 139       break;
 140     }


 166 
 167   if (started()) {
 168     set_in_progress();
 169   }
 170 
 171   return should_terminate();
 172 }
 173 
 174 bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() {
 175   G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks");
 176   ClassLoaderDataGraph::clear_claimed_marks();
 177   return _cm->has_aborted();
 178 }
 179 
 180 bool G1ConcurrentMarkThread::phase_scan_root_regions() {
 181   G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions");
 182   _cm->scan_root_regions();
 183   return _cm->has_aborted();
 184 }
 185 
 186 bool G1ConcurrentMarkThread::phase_mark_from_roots() {







































 187   ConcurrentGCBreakpoints::at("AFTER MARKING STARTED");
 188   G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots");
 189   _cm->mark_from_roots();
 190   return _cm->has_aborted();
 191 }
 192 
 193 bool G1ConcurrentMarkThread::phase_preclean() {
 194   G1ConcPhaseTimer p(_cm, "Concurrent Preclean");
 195   _cm->preclean();
 196   return _cm->has_aborted();
 197 }
 198 
 199 bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_remark() {
 200   delay_to_keep_mmu(true /* remark */);
 201   return _cm->has_aborted();
 202 }
 203 
 204 bool G1ConcurrentMarkThread::phase_remark(bool& has_overflown) {
 205   ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED");
 206   CMRemark cl(_cm);
 207   VM_G1Concurrent op(&cl, "Pause Remark");
 208   VMThread::execute(&op);
 209   has_overflown = _cm->has_overflown();
 210   return _cm->has_aborted();
 211 }
 212 
 213 bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets() {
 214   G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets");
 215   _cm->rebuild_rem_set_concurrently();
 216   return _cm->has_aborted();
 217 }
 218 
 219 bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_cleanup() {
 220   delay_to_keep_mmu(false /* cleanup */);
 221   return _cm->has_aborted();
 222 }
 223 
 224 bool G1ConcurrentMarkThread::phase_cleanup() {
 225   CMCleanup cl(_cm);
 226   VM_G1Concurrent op(&cl, "Pause Cleanup");
 227   VMThread::execute(&op);
 228   return _cm->has_aborted();
 229 }


 234   return _cm->has_aborted();
 235 }
 236 
 237 void G1ConcurrentMarkThread::concurrent_cycle_start() {
 238   _cm->concurrent_cycle_start();
 239 }
 240 
 241 void G1ConcurrentMarkThread::full_concurrent_cycle_do() {
 242   HandleMark hm(Thread::current());
 243   ResourceMark rm;
 244 
 245   // Phase 1: Clear CLD claimed marks.
 246   phase_clear_cld_claimed_marks();
 247 
 248   // Do not return before the scan root regions phase as a GC waits for a
 249   // notification from it.
 250 
 251   // Phase 2: Scan root regions.
 252   if (phase_scan_root_regions()) return;
 253 
 254   Ticks mark_start = Ticks::now();
 255   log_info(gc, marking)("Concurrent Mark (%.3fs)", mark_start.seconds());
 256 
 257   bool needs_restart;
 258   uint iter = 1;
 259   do {
 260    // Phase 3: Mark From Roots.
 261     if (phase_mark_from_roots()) return;
 262 
 263     // Phase 4: Preclean (optional)
 264     if (G1UseReferencePrecleaning) {
 265       if (phase_preclean()) return;
 266     }
 267 
 268     // Phase 5: Wait for Remark.
 269     if (phase_delay_to_keep_mmu_before_remark()) return;
 270 
 271     // Phase 6: Remark pause
 272     if (phase_remark(needs_restart)) return;
 273     if (needs_restart) {
 274       log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)",
 275                             iter++);
 276     }
 277   } while (needs_restart);
 278 
 279   Ticks mark_end = Ticks::now();
 280   log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
 281                         mark_start.seconds(), mark_end.seconds(),
 282                         (mark_end - mark_start).seconds() * 1000.0);
 283 
 284   // Phase 7: Rebuild remembered sets.
 285   if (phase_rebuild_remembered_sets()) return;
 286 
 287   // Phase 8: Wait for Cleanup.
 288   if (phase_delay_to_keep_mmu_before_cleanup()) return;
 289 
 290   // Phase 9: Cleanup pause
 291   if (phase_cleanup()) return;
 292 
 293   // Phase 10: Clear bitmap for next mark.
 294   phase_clear_bitmap_for_next_mark();
 295 }
 296 
 297 void G1ConcurrentMarkThread::concurrent_cycle_end() {
 298   // Update the number of full collections that have been
 299   // completed. This will also notify the G1OldGCCount_lock in case a
 300   // Java thread is waiting for a full GC to happen (e.g., it
 301   // called System.gc() with +ExplicitGCInvokesConcurrent).
 302   SuspendibleThreadSetJoiner sts_join;
 303   G1CollectedHeap::heap()->increment_old_marking_cycles_completed(true /* concurrent */,
 304                                                                   !_cm->has_aborted());
 305 
 306   _cm->concurrent_cycle_end();
 307   ConcurrentGCBreakpoints::notify_active_to_idle();
 308 }


 100 
 101   if (policy->use_adaptive_young_list_length()) {
 102     double delay_end_sec = mmu_delay_end(policy, remark);
 103     // Wait for timeout or thread termination request.
 104     MonitorLocker ml(CGC_lock, Monitor::_no_safepoint_check_flag);
 105     while (!_cm->has_aborted() && !should_terminate()) {
 106       double sleep_time_sec = (delay_end_sec - os::elapsedTime());
 107       jlong sleep_time_ms = ceil(sleep_time_sec * MILLIUNITS);
 108       if (sleep_time_ms <= 0) {
 109         break;                  // Passed end time.
 110       } else if (ml.wait(sleep_time_ms, Monitor::_no_safepoint_check_flag)) {
 111         break;                  // Timeout => reached end time.
 112       }
 113       // Other (possibly spurious) wakeup.  Retry with updated sleep time.
 114     }
 115   }
 116 }
 117 
 118 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
 119   G1ConcurrentMark* _cm;
 120 
 121  public:
 122   G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
 123     GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
 124     _cm(cm)
 125   {
 126     _cm->gc_timer_cm()->register_gc_concurrent_start(title);
 127   }
 128 
 129   ~G1ConcPhaseTimer() {
 130     _cm->gc_timer_cm()->register_gc_concurrent_end();
 131   }
 132 };
 133 
 134 void G1ConcurrentMarkThread::run_service() {
 135   _vtime_start = os::elapsedVTime();
 136 
 137   while (!should_terminate()) {
 138     if (wait_for_next_cycle()) {
 139       break;
 140     }


 166 
 167   if (started()) {
 168     set_in_progress();
 169   }
 170 
 171   return should_terminate();
 172 }
 173 
 174 bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() {
 175   G1ConcPhaseTimer p(_cm, "Concurrent Clear Claimed Marks");
 176   ClassLoaderDataGraph::clear_claimed_marks();
 177   return _cm->has_aborted();
 178 }
 179 
 180 bool G1ConcurrentMarkThread::phase_scan_root_regions() {
 181   G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions");
 182   _cm->scan_root_regions();
 183   return _cm->has_aborted();
 184 }
 185 
 186 bool G1ConcurrentMarkThread::phase_mark_loop() {
 187   Ticks mark_start = Ticks::now();
 188   log_info(gc, marking)("Concurrent Mark (%.3fs)", mark_start.seconds());
 189 
 190   uint iter = 1;
 191   while (true) {
 192     // Subphase 1: Mark From Roots.
 193     if (subphase_mark_from_roots()) return true;
 194 
 195     // Subphase 2: Preclean (optional)
 196     if (G1UseReferencePrecleaning) {
 197       if (subphase_preclean()) return true;
 198     }
 199 
 200     // Subphase 3: Wait for Remark.
 201     if (subphase_delay_to_keep_mmu_before_remark()) return true;
 202 
 203     // Subphase 4: Remark pause
 204     if (subphase_remark()) return true;
 205 
 206     // Check if we need to restart the marking loop.
 207     if (!mark_loop_needs_restart()) break;
 208 
 209     log_info(gc, marking)("Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)",
 210                           iter++);
 211   }
 212 
 213   Ticks mark_end = Ticks::now();
 214   log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
 215                         mark_start.seconds(), mark_end.seconds(),
 216                         (mark_end - mark_start).seconds() * 1000.0);
 217 
 218   return false;
 219 }
 220 
 221 bool G1ConcurrentMarkThread::mark_loop_needs_restart() const {
 222   return _cm->has_overflown();
 223 }
 224 
 225 bool G1ConcurrentMarkThread::subphase_mark_from_roots() {
 226   ConcurrentGCBreakpoints::at("AFTER MARKING STARTED");
 227   G1ConcPhaseTimer p(_cm, "Concurrent Mark From Roots");
 228   _cm->mark_from_roots();
 229   return _cm->has_aborted();
 230 }
 231 
 232 bool G1ConcurrentMarkThread::subphase_preclean() {
 233   G1ConcPhaseTimer p(_cm, "Concurrent Preclean");
 234   _cm->preclean();
 235   return _cm->has_aborted();
 236 }
 237 
 238 bool G1ConcurrentMarkThread::subphase_delay_to_keep_mmu_before_remark() {
 239   delay_to_keep_mmu(true /* remark */);
 240   return _cm->has_aborted();
 241 }
 242 
 243 bool G1ConcurrentMarkThread::subphase_remark() {
 244   ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED");
 245   CMRemark cl(_cm);
 246   VM_G1Concurrent op(&cl, "Pause Remark");
 247   VMThread::execute(&op);

 248   return _cm->has_aborted();
 249 }
 250 
 251 bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets() {
 252   G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets");
 253   _cm->rebuild_rem_set_concurrently();
 254   return _cm->has_aborted();
 255 }
 256 
 257 bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_cleanup() {
 258   delay_to_keep_mmu(false /* cleanup */);
 259   return _cm->has_aborted();
 260 }
 261 
 262 bool G1ConcurrentMarkThread::phase_cleanup() {
 263   CMCleanup cl(_cm);
 264   VM_G1Concurrent op(&cl, "Pause Cleanup");
 265   VMThread::execute(&op);
 266   return _cm->has_aborted();
 267 }


 272   return _cm->has_aborted();
 273 }
 274 
 275 void G1ConcurrentMarkThread::concurrent_cycle_start() {
 276   _cm->concurrent_cycle_start();
 277 }
 278 
 279 void G1ConcurrentMarkThread::full_concurrent_cycle_do() {
 280   HandleMark hm(Thread::current());
 281   ResourceMark rm;
 282 
 283   // Phase 1: Clear CLD claimed marks.
 284   phase_clear_cld_claimed_marks();
 285 
 286   // Do not return before the scan root regions phase as a GC waits for a
 287   // notification from it.
 288 
 289   // Phase 2: Scan root regions.
 290   if (phase_scan_root_regions()) return;
 291 
 292   // Phase 3: Actual mark loop.
 293   if (phase_mark_loop()) return;



























 294 
 295   // Phase 4: Rebuild remembered sets.
 296   if (phase_rebuild_remembered_sets()) return;
 297 
 298   // Phase 5: Wait for Cleanup.
 299   if (phase_delay_to_keep_mmu_before_cleanup()) return;
 300 
 301   // Phase 6: Cleanup pause
 302   if (phase_cleanup()) return;
 303 
 304   // Phase 7: Clear bitmap for next mark.
 305   phase_clear_bitmap_for_next_mark();
 306 }
 307 
 308 void G1ConcurrentMarkThread::concurrent_cycle_end() {
 309   // Update the number of full collections that have been
 310   // completed. This will also notify the G1OldGCCount_lock in case a
 311   // Java thread is waiting for a full GC to happen (e.g., it
 312   // called System.gc() with +ExplicitGCInvokesConcurrent).
 313   SuspendibleThreadSetJoiner sts_join;
 314   G1CollectedHeap::heap()->increment_old_marking_cycles_completed(true /* concurrent */,
 315                                                                   !_cm->has_aborted());
 316 
 317   _cm->concurrent_cycle_end();
 318   ConcurrentGCBreakpoints::notify_active_to_idle();
 319 }
< prev index next >