src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 2891 : 7120038: G1: ParallelGCThreads==0 is broken
Summary: Running G1 with ParallelGCThreads==0 results in various crashes and asserts. Most of these are caused by unguarded references to the worker threads array or an incorrect number of active workers.
Reviewed-by:

*** 1117,1130 **** // Calculates the number of active workers for a concurrent // phase. int ConcurrentMark::calc_parallel_marking_threads() { ! size_t n_conc_workers; ! if (!G1CollectedHeap::use_parallel_gc_threads()) { ! n_conc_workers = 1; ! } else { if (!UseDynamicNumberOfGCThreads || (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) { n_conc_workers = max_parallel_marking_threads(); } else { --- 1117,1128 ---- // Calculates the number of active workers for a concurrent // phase. int ConcurrentMark::calc_parallel_marking_threads() { ! size_t n_conc_workers = 0; ! if (G1CollectedHeap::use_parallel_gc_threads()) { if (!UseDynamicNumberOfGCThreads || (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) { n_conc_workers = max_parallel_marking_threads(); } else {
*** 1135,1147 **** parallel_marking_threads(), Threads::number_of_non_daemon_threads()); // Don't scale down "n_conc_workers" by scale_parallel_threads() because // that scaling has already gone into "_max_parallel_marking_threads". } - } assert(n_conc_workers > 0, "Always need at least 1"); ! return (int) MAX2(n_conc_workers, (size_t) 1); } void ConcurrentMark::markFromRoots() { // we might be tempted to assert that: // assert(asynch == !SafepointSynchronize::is_at_safepoint(), --- 1133,1145 ---- parallel_marking_threads(), Threads::number_of_non_daemon_threads()); // Don't scale down "n_conc_workers" by scale_parallel_threads() because // that scaling has already gone into "_max_parallel_marking_threads". } assert(n_conc_workers > 0, "Always need at least 1"); ! } ! return (int) MAX2(n_conc_workers, max_parallel_marking_threads()); } void ConcurrentMark::markFromRoots() { // we might be tempted to assert that: // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
*** 1149,1176 **** // However that wouldn't be right, because it's possible that // a safepoint is indeed in progress as a younger generation // stop-the-world GC happens even as we mark in this generation. _restart_for_overflow = false; - - // Parallel task terminator is set in "set_phase()". force_overflow_conc()->init(); // _g1h has _n_par_threads - _parallel_marking_threads = calc_parallel_marking_threads(); assert(parallel_marking_threads() <= max_parallel_marking_threads(), "Maximum number of marking threads exceeded"); - _parallel_workers->set_active_workers((int)_parallel_marking_threads); - // Don't set _n_par_threads because it affects MT in proceess_strong_roots() - // and the decisions on that MT processing is made elsewhere. ! assert( _parallel_workers->active_workers() > 0, "Should have been set"); ! set_phase(_parallel_workers->active_workers(), true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); if (parallel_marking_threads() > 0) { _parallel_workers->run_task(&markingTask); } else { markingTask.work(0); } print_stats(); --- 1147,1174 ---- // However that wouldn't be right, because it's possible that // a safepoint is indeed in progress as a younger generation // stop-the-world GC happens even as we mark in this generation. _restart_for_overflow = false; force_overflow_conc()->init(); // _g1h has _n_par_threads _parallel_marking_threads = calc_parallel_marking_threads(); assert(parallel_marking_threads() <= max_parallel_marking_threads(), "Maximum number of marking threads exceeded"); ! size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); ! ! // Parallel task terminator is set in "set_phase()" ! set_phase(active_workers, true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); if (parallel_marking_threads() > 0) { + _parallel_workers->set_active_workers((int)active_workers); + // Don't set _n_par_threads because it affects MT in proceess_strong_roots() + // and the decisions on that MT processing is made elsewhere. + assert(_parallel_workers->active_workers() > 0, "Should have been set"); _parallel_workers->run_task(&markingTask); } else { markingTask.work(0); } print_stats();
*** 1763,1793 **** double start = os::elapsedTime(); HeapRegionRemSet::reset_for_cleanup_tasks(); ! g1h->set_par_threads(); ! size_t n_workers = g1h->n_par_threads(); // Do counting once more with the world stopped for good measure. G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), &_region_bm, &_card_bm); if (G1CollectedHeap::use_parallel_gc_threads()) { assert(g1h->check_heap_region_claim_values( HeapRegion::InitialClaimValue), "sanity check"); assert(g1h->n_par_threads() == (int) n_workers, "Should not have been reset"); g1h->workers()->run_task(&g1_par_count_task); // Done with the parallel phase so reset to 0. g1h->set_par_threads(0); assert(g1h->check_heap_region_claim_values( HeapRegion::FinalCountClaimValue), "sanity check"); } else { g1_par_count_task.work(0); } size_t known_garbage_bytes = g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); --- 1761,1794 ---- double start = os::elapsedTime(); HeapRegionRemSet::reset_for_cleanup_tasks(); ! size_t n_workers; ! // Do counting once more with the world stopped for good measure. G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), &_region_bm, &_card_bm); if (G1CollectedHeap::use_parallel_gc_threads()) { assert(g1h->check_heap_region_claim_values( HeapRegion::InitialClaimValue), "sanity check"); + g1h->set_par_threads(); + n_workers = g1h->n_par_threads(); assert(g1h->n_par_threads() == (int) n_workers, "Should not have been reset"); g1h->workers()->run_task(&g1_par_count_task); // Done with the parallel phase so reset to 0. g1h->set_par_threads(0); assert(g1h->check_heap_region_claim_values( HeapRegion::FinalCountClaimValue), "sanity check"); } else { + n_workers = 1; g1_par_count_task.work(0); } size_t known_garbage_bytes = g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
*** 1849,1859 **** if (G1PrintParCleanupStats) { gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", (note_end_end - note_end_start)*1000.0); } - // call below, since it affects the metric by which we sort the heap // regions. if (G1ScrubRemSets) { double rs_scrub_start = os::elapsedTime(); G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); --- 1850,1859 ----
*** 2327,2339 **** // want to abort remark and do concurrent marking again. task->record_end_time(); } } ! CMRemarkTask(ConcurrentMark* cm) : AbstractGangTask("Par Remark"), _cm(cm) { ! _cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers()); } }; void ConcurrentMark::checkpointRootsFinalWork() { ResourceMark rm; --- 2327,2339 ---- // want to abort remark and do concurrent marking again. task->record_end_time(); } } ! CMRemarkTask(ConcurrentMark* cm, int active_workers) : AbstractGangTask("Par Remark"), _cm(cm) { ! _cm->terminator()->reset_for_reuse(active_workers); } }; void ConcurrentMark::checkpointRootsFinalWork() { ResourceMark rm;
*** 2355,2375 **** // Leave _parallel_marking_threads at it's // value originally calculated in the ConcurrentMark // constructor and pass values of the active workers // through the gang in the task. ! CMRemarkTask remarkTask(this); g1h->set_par_threads(active_workers); g1h->workers()->run_task(&remarkTask); g1h->set_par_threads(0); } else { G1CollectedHeap::StrongRootsScope srs(g1h); // this is remark, so we'll use up all available threads int active_workers = 1; set_phase(active_workers, false /* concurrent */); ! CMRemarkTask remarkTask(this); // We will start all available threads, even if we decide that the // active_workers will be fewer. The extra ones will just bail out // immediately. remarkTask.work(0); } --- 2355,2375 ---- // Leave _parallel_marking_threads at it's // value originally calculated in the ConcurrentMark // constructor and pass values of the active workers // through the gang in the task. ! CMRemarkTask remarkTask(this, active_workers); g1h->set_par_threads(active_workers); g1h->workers()->run_task(&remarkTask); g1h->set_par_threads(0); } else { G1CollectedHeap::StrongRootsScope srs(g1h); // this is remark, so we'll use up all available threads int active_workers = 1; set_phase(active_workers, false /* concurrent */); ! CMRemarkTask remarkTask(this, active_workers); // We will start all available threads, even if we decide that the // active_workers will be fewer. The extra ones will just bail out // immediately. remarkTask.work(0); }
*** 3121,3137 **** g1h->g1_policy()->record_mark_closure_time(0.0); return; } double start = os::elapsedTime(); - int n_workers = g1h->workers()->total_workers(); - G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this); assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); if (G1CollectedHeap::use_parallel_gc_threads()) { g1h->set_par_threads(n_workers); g1h->workers()->run_task(&complete_mark_task); g1h->set_par_threads(0); } else { complete_mark_task.work(0); --- 3121,3136 ---- g1h->g1_policy()->record_mark_closure_time(0.0); return; } double start = os::elapsedTime(); G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this); assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); if (G1CollectedHeap::use_parallel_gc_threads()) { + int n_workers = g1h->workers()->active_workers(); g1h->set_par_threads(n_workers); g1h->workers()->run_task(&complete_mark_task); g1h->set_par_threads(0); } else { complete_mark_task.work(0);