--- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2011-12-12 17:53:02.713816221 -0800 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2011-12-12 17:53:02.493290725 -0800 @@ -1119,10 +1119,8 @@ // phase. int ConcurrentMark::calc_parallel_marking_threads() { - size_t n_conc_workers; - if (!G1CollectedHeap::use_parallel_gc_threads()) { - n_conc_workers = 1; - } else { + size_t n_conc_workers = 0; + if (G1CollectedHeap::use_parallel_gc_threads()) { if (!UseDynamicNumberOfGCThreads || (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) { @@ -1137,9 +1135,9 @@ // Don't scale down "n_conc_workers" by scale_parallel_threads() because // that scaling has already gone into "_max_parallel_marking_threads". } + assert(n_conc_workers > 0, "Always need at least 1"); } - assert(n_conc_workers > 0, "Always need at least 1"); - return (int) MAX2(n_conc_workers, (size_t) 1); + return (int) MAX2(n_conc_workers, max_parallel_marking_threads()); } void ConcurrentMark::markFromRoots() { @@ -1151,24 +1149,24 @@ // stop-the-world GC happens even as we mark in this generation. _restart_for_overflow = false; - - // Parallel task terminator is set in "set_phase()". force_overflow_conc()->init(); // _g1h has _n_par_threads - _parallel_marking_threads = calc_parallel_marking_threads(); assert(parallel_marking_threads() <= max_parallel_marking_threads(), "Maximum number of marking threads exceeded"); - _parallel_workers->set_active_workers((int)_parallel_marking_threads); - // Don't set _n_par_threads because it affects MT in proceess_strong_roots() - // and the decisions on that MT processing is made elsewhere. - assert( _parallel_workers->active_workers() > 0, "Should have been set"); - set_phase(_parallel_workers->active_workers(), true /* concurrent */); + size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); + + // Parallel task terminator is set in "set_phase()" + set_phase(active_workers, true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); if (parallel_marking_threads() > 0) { + _parallel_workers->set_active_workers((int)active_workers); + // Don't set _n_par_threads because it affects MT in proceess_strong_roots() + // and the decisions on that MT processing is made elsewhere. + assert(_parallel_workers->active_workers() > 0, "Should have been set"); _parallel_workers->run_task(&markingTask); } else { markingTask.work(0); @@ -1765,8 +1763,8 @@ HeapRegionRemSet::reset_for_cleanup_tasks(); - g1h->set_par_threads(); - size_t n_workers = g1h->n_par_threads(); + size_t n_workers; + // Do counting once more with the world stopped for good measure. G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), @@ -1775,9 +1773,11 @@ assert(g1h->check_heap_region_claim_values( HeapRegion::InitialClaimValue), "sanity check"); - + + g1h->set_par_threads(); + n_workers = g1h->n_par_threads(); assert(g1h->n_par_threads() == (int) n_workers, - "Should not have been reset"); + "Should not have been reset"); g1h->workers()->run_task(&g1_par_count_task); // Done with the parallel phase so reset to 0. g1h->set_par_threads(0); @@ -1786,6 +1786,7 @@ HeapRegion::FinalCountClaimValue), "sanity check"); } else { + n_workers = 1; g1_par_count_task.work(0); } @@ -1851,7 +1852,6 @@ (note_end_end - note_end_start)*1000.0); } - // call below, since it affects the metric by which we sort the heap // regions. if (G1ScrubRemSets) { @@ -2329,9 +2329,9 @@ } } - CMRemarkTask(ConcurrentMark* cm) : + CMRemarkTask(ConcurrentMark* cm, int active_workers) : AbstractGangTask("Par Remark"), _cm(cm) { - _cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers()); + _cm->terminator()->reset_for_reuse(active_workers); } }; @@ -2357,7 +2357,7 @@ // constructor and pass values of the active workers // through the gang in the task. - CMRemarkTask remarkTask(this); + CMRemarkTask remarkTask(this, active_workers); g1h->set_par_threads(active_workers); g1h->workers()->run_task(&remarkTask); g1h->set_par_threads(0); @@ -2367,7 +2367,7 @@ int active_workers = 1; set_phase(active_workers, false /* concurrent */); - CMRemarkTask remarkTask(this); + CMRemarkTask remarkTask(this, active_workers); // We will start all available threads, even if we decide that the // active_workers will be fewer. The extra ones will just bail out // immediately. @@ -3123,13 +3123,12 @@ } double start = os::elapsedTime(); - int n_workers = g1h->workers()->total_workers(); - G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this); assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); if (G1CollectedHeap::use_parallel_gc_threads()) { + int n_workers = g1h->workers()->active_workers(); g1h->set_par_threads(n_workers); g1h->workers()->run_task(&complete_mark_task); g1h->set_par_threads(0);