index

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 7184 : 6979279
rev 7185 : [mq]: rev1


 643   assert(ConcGCThreads > 0, "Should have been set");
 644   _parallel_marking_threads = (uint) ConcGCThreads;
 645   _max_parallel_marking_threads = _parallel_marking_threads;
 646 
 647   if (parallel_marking_threads() > 1) {
 648     _cleanup_task_overhead = 1.0;
 649   } else {
 650     _cleanup_task_overhead = marking_task_overhead();
 651   }
 652   _cleanup_sleep_factor =
 653                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 654 
 655 #if 0
 656   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 657   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 658   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 659   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 660   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 661 #endif
 662 
 663   guarantee(parallel_marking_threads() > 0, "peace of mind");
 664   _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 665        _max_parallel_marking_threads, false, true);
 666   if (_parallel_workers == NULL) {
 667     vm_exit_during_initialization("Failed necessary allocation.");
 668   } else {
 669     _parallel_workers->initialize_workers();
 670   }
 671 
 672   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 673     uintx mark_stack_size =
 674       MIN2(MarkStackSizeMax,
 675           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 676     // Verify that the calculated value for MarkStackSize is in range.
 677     // It would be nice to use the private utility routine from Arguments.
 678     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 679       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 680               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 681               mark_stack_size, (uintx) 1, MarkStackSizeMax);
 682       return;
 683     }


1208       _cm->scanRootRegion(hr, worker_id);
1209       hr = root_regions->claim_next();
1210     }
1211   }
1212 };
1213 
1214 void ConcurrentMark::scanRootRegions() {
1215   // Start of concurrent marking.
1216   ClassLoaderDataGraph::clear_claimed_marks();
1217 
1218   // scan_in_progress() will have been set to true only if there was
1219   // at least one root region to scan. So, if it's false, we
1220   // should not attempt to do any further work.
1221   if (root_regions()->scan_in_progress()) {
1222     _parallel_marking_threads = calc_parallel_marking_threads();
1223     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1224            "Maximum number of marking threads exceeded");
1225     uint active_workers = MAX2(1U, parallel_marking_threads());
1226 
1227     CMRootRegionScanTask task(this);
1228     if (use_parallel_marking_threads()) {
1229       _parallel_workers->set_active_workers((int) active_workers);
1230       _parallel_workers->run_task(&task);
1231     } else {
1232       task.work(0);
1233     }
1234 
1235     // It's possible that has_aborted() is true here without actually
1236     // aborting the survivor scan earlier. This is OK as it's
1237     // mainly used for sanity checking.
1238     root_regions()->scan_finished();
1239   }
1240 }
1241 
1242 void ConcurrentMark::markFromRoots() {
1243   // we might be tempted to assert that:
1244   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1245   //        "inconsistent argument?");
1246   // However that wouldn't be right, because it's possible that
1247   // a safepoint is indeed in progress as a younger generation
1248   // stop-the-world GC happens even as we mark in this generation.
1249 
1250   _restart_for_overflow = false;
1251   force_overflow_conc()->init();
1252 
1253   // _g1h has _n_par_threads
1254   _parallel_marking_threads = calc_parallel_marking_threads();
1255   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1256     "Maximum number of marking threads exceeded");
1257 
1258   uint active_workers = MAX2(1U, parallel_marking_threads());
1259 
1260   // Parallel task terminator is set in "set_concurrency_and_phase()"
1261   set_concurrency_and_phase(active_workers, true /* concurrent */);
1262 
1263   CMConcurrentMarkingTask markingTask(this, cmThread());
1264   if (use_parallel_marking_threads()) {
1265     _parallel_workers->set_active_workers((int)active_workers);
1266     // Don't set _n_par_threads because it affects MT in process_roots()
1267     // and the decisions on that MT processing is made elsewhere.
1268     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1269     _parallel_workers->run_task(&markingTask);
1270   } else {
1271     markingTask.work(0);
1272   }
1273   print_stats();
1274 }
1275 
1276 // Helper class to get rid of some boilerplate code.
1277 class G1CMTraceTime : public GCTraceTime {
1278   static bool doit_and_prepend(bool doit) {
1279     if (doit) {
1280       gclog_or_tty->put(' ');
1281     }
1282     return doit;
1283   }
1284 
1285  public:
1286   G1CMTraceTime(const char* title, bool doit)
1287     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1288         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1289   }
1290 };
1291 
1292 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {


3324                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3325                           (double)_cleanup_times.num()
3326                          : 0.0));
3327   if (G1ScrubRemSets) {
3328     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3329                            _total_rs_scrub_time,
3330                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3331                             (double)_cleanup_times.num()
3332                            : 0.0));
3333   }
3334   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3335                          (_init_times.sum() + _remark_times.sum() +
3336                           _cleanup_times.sum())/1000.0);
3337   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3338                 "(%8.2f s marking).",
3339                 cmThread()->vtime_accum(),
3340                 cmThread()->vtime_mark_accum());
3341 }
3342 
3343 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3344   if (use_parallel_marking_threads()) {
3345     _parallel_workers->print_worker_threads_on(st);
3346   }
3347 }
3348 
3349 void ConcurrentMark::print_on_error(outputStream* st) const {
3350   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3351       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3352   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3353   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3354 }
3355 
3356 // We take a break if someone is trying to stop the world.
3357 bool ConcurrentMark::do_yield_check(uint worker_id) {
3358   if (SuspendibleThreadSet::should_yield()) {
3359     if (worker_id == 0) {
3360       _g1h->g1_policy()->record_concurrent_pause();
3361     }
3362     SuspendibleThreadSet::yield();
3363     return true;
3364   } else {
3365     return false;
3366   }




 643   assert(ConcGCThreads > 0, "Should have been set");
 644   _parallel_marking_threads = (uint) ConcGCThreads;
 645   _max_parallel_marking_threads = _parallel_marking_threads;
 646 
 647   if (parallel_marking_threads() > 1) {
 648     _cleanup_task_overhead = 1.0;
 649   } else {
 650     _cleanup_task_overhead = marking_task_overhead();
 651   }
 652   _cleanup_sleep_factor =
 653                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 654 
 655 #if 0
 656   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 657   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 658   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 659   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 660   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 661 #endif
 662 

 663   _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 664        _max_parallel_marking_threads, false, true);
 665   if (_parallel_workers == NULL) {
 666     vm_exit_during_initialization("Failed necessary allocation.");
 667   } else {
 668     _parallel_workers->initialize_workers();
 669   }
 670 
 671   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 672     uintx mark_stack_size =
 673       MIN2(MarkStackSizeMax,
 674           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 675     // Verify that the calculated value for MarkStackSize is in range.
 676     // It would be nice to use the private utility routine from Arguments.
 677     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 678       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 679               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 680               mark_stack_size, (uintx) 1, MarkStackSizeMax);
 681       return;
 682     }


1207       _cm->scanRootRegion(hr, worker_id);
1208       hr = root_regions->claim_next();
1209     }
1210   }
1211 };
1212 
1213 void ConcurrentMark::scanRootRegions() {
1214   // Start of concurrent marking.
1215   ClassLoaderDataGraph::clear_claimed_marks();
1216 
1217   // scan_in_progress() will have been set to true only if there was
1218   // at least one root region to scan. So, if it's false, we
1219   // should not attempt to do any further work.
1220   if (root_regions()->scan_in_progress()) {
1221     _parallel_marking_threads = calc_parallel_marking_threads();
1222     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1223            "Maximum number of marking threads exceeded");
1224     uint active_workers = MAX2(1U, parallel_marking_threads());
1225 
1226     CMRootRegionScanTask task(this);
1227     _parallel_workers->set_active_workers(active_workers);

1228     _parallel_workers->run_task(&task);



1229 
1230     // It's possible that has_aborted() is true here without actually
1231     // aborting the survivor scan earlier. This is OK as it's
1232     // mainly used for sanity checking.
1233     root_regions()->scan_finished();
1234   }
1235 }
1236 
1237 void ConcurrentMark::markFromRoots() {
1238   // we might be tempted to assert that:
1239   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1240   //        "inconsistent argument?");
1241   // However that wouldn't be right, because it's possible that
1242   // a safepoint is indeed in progress as a younger generation
1243   // stop-the-world GC happens even as we mark in this generation.
1244 
1245   _restart_for_overflow = false;
1246   force_overflow_conc()->init();
1247 
1248   // _g1h has _n_par_threads
1249   _parallel_marking_threads = calc_parallel_marking_threads();
1250   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1251     "Maximum number of marking threads exceeded");
1252 
1253   uint active_workers = MAX2(1U, parallel_marking_threads());
1254 
1255   // Parallel task terminator is set in "set_concurrency_and_phase()"
1256   set_concurrency_and_phase(active_workers, true /* concurrent */);
1257 
1258   CMConcurrentMarkingTask markingTask(this, cmThread());
1259   _parallel_workers->set_active_workers(active_workers);

1260   // Don't set _n_par_threads because it affects MT in process_roots()
1261   // and the decisions on that MT processing is made elsewhere.
1262   assert(_parallel_workers->active_workers() > 0, "Should have been set");
1263   _parallel_workers->run_task(&markingTask);



1264   print_stats();
1265 }
1266 
1267 // Helper class to get rid of some boilerplate code.
1268 class G1CMTraceTime : public GCTraceTime {
1269   static bool doit_and_prepend(bool doit) {
1270     if (doit) {
1271       gclog_or_tty->put(' ');
1272     }
1273     return doit;
1274   }
1275 
1276  public:
1277   G1CMTraceTime(const char* title, bool doit)
1278     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1279         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1280   }
1281 };
1282 
1283 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {


3315                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3316                           (double)_cleanup_times.num()
3317                          : 0.0));
3318   if (G1ScrubRemSets) {
3319     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3320                            _total_rs_scrub_time,
3321                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3322                             (double)_cleanup_times.num()
3323                            : 0.0));
3324   }
3325   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3326                          (_init_times.sum() + _remark_times.sum() +
3327                           _cleanup_times.sum())/1000.0);
3328   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3329                 "(%8.2f s marking).",
3330                 cmThread()->vtime_accum(),
3331                 cmThread()->vtime_mark_accum());
3332 }
3333 
3334 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {

3335   _parallel_workers->print_worker_threads_on(st);

3336 }
3337 
3338 void ConcurrentMark::print_on_error(outputStream* st) const {
3339   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3340       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3341   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3342   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3343 }
3344 
3345 // We take a break if someone is trying to stop the world.
3346 bool ConcurrentMark::do_yield_check(uint worker_id) {
3347   if (SuspendibleThreadSet::should_yield()) {
3348     if (worker_id == 0) {
3349       _g1h->g1_policy()->record_concurrent_pause();
3350     }
3351     SuspendibleThreadSet::yield();
3352     return true;
3353   } else {
3354     return false;
3355   }


index