src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 290,306 **** } void CMSCollector::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor ! _ref_processor = ReferenceProcessor::create_ref_processor( ! _span, // span ! _cmsGen->refs_discovery_is_atomic(), // atomic_discovery ! _cmsGen->refs_discovery_is_mt(), // mt_discovery ! &_is_alive_closure, ! ParallelGCThreads, ! ParallelRefProcEnabled); // Initialize the _ref_processor field of CMSGen _cmsGen->set_ref_processor(_ref_processor); // Allocate a dummy ref processor for perm gen. ReferenceProcessor* rp2 = new ReferenceProcessor(); --- 290,308 ---- } void CMSCollector::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor ! _ref_processor = ! new ReferenceProcessor(_span, // span ! (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing ! ParallelGCThreads, // mt processing degree ! _cmsGen->refs_discovery_is_mt(), // mt discovery ! MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree ! _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic ! &_is_alive_closure, // closure for liveness info ! false); // next field updates do not need write barrier // Initialize the _ref_processor field of CMSGen _cmsGen->set_ref_processor(_ref_processor); // Allocate a dummy ref processor for perm gen. ReferenceProcessor* rp2 = new ReferenceProcessor();
*** 639,649 **** warning("Failed to allocate CMS Revisit Stack"); return; } // Support for multi-threaded concurrent phases ! if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) { if (FLAG_IS_DEFAULT(ConcGCThreads)) { // just for now FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); } if (ConcGCThreads > 1) { --- 641,651 ---- warning("Failed to allocate CMS Revisit Stack"); return; } // Support for multi-threaded concurrent phases ! if (CMSConcurrentMTEnabled) { if (FLAG_IS_DEFAULT(ConcGCThreads)) { // just for now FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); } if (ConcGCThreads > 1) {
*** 1998,2007 **** --- 2000,2012 ---- ReferenceProcessorMTProcMutator z(ref_processor(), false); // Temporarily make refs discovery atomic ReferenceProcessorAtomicMutator w(ref_processor(), true); + // Temporarily make refs discovery ST + ReferenceProcessorMTDiscoveryMutator(ref_processor(), false); + ref_processor()->set_enqueuing_is_done(false); ref_processor()->enable_discovery(); ref_processor()->setup_policy(clear_all_soft_refs); // If an asynchronous collection finishes, the _modUnionTable is // all clear. If we are assuming the collection from an asynchronous
*** 4261,4273 **** cms_space ->initialize_sequential_subtasks_for_marking(num_workers); perm_space->initialize_sequential_subtasks_for_marking(num_workers); // Refs discovery is already non-atomic. assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); ! // Mutate the Refs discovery so it is MT during the ! // multi-threaded marking phase. ! ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) conc_workers()->start_task(&tsk); while (tsk.yielded()) { tsk.coordinator_yield(); conc_workers()->continue_task(&tsk); --- 4266,4276 ---- cms_space ->initialize_sequential_subtasks_for_marking(num_workers); perm_space->initialize_sequential_subtasks_for_marking(num_workers); // Refs discovery is already non-atomic. assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); ! assert(num_workers <= 1 || ref_processor()->discovery_is_mt(), "Discovery should be MT"); DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) conc_workers()->start_task(&tsk); while (tsk.yielded()) { tsk.coordinator_yield(); conc_workers()->continue_task(&tsk);
*** 5574,5585 **** // It turns out that even when we're using 1 thread, doing the work in a // separate thread causes wide variance in run times. We can't help this // in the multi-threaded case, but we special-case n=1 here to get // repeatable measurements of the 1-thread overhead of the parallel code. if (n_workers > 1) { ! // Make refs discovery MT-safe ! ReferenceProcessorMTMutator mt(ref_processor(), true); GenCollectedHeap::StrongRootsScope srs(gch); workers->run_task(&tsk); } else { GenCollectedHeap::StrongRootsScope srs(gch); tsk.work(0); --- 5577,5588 ---- // It turns out that even when we're using 1 thread, doing the work in a // separate thread causes wide variance in run times. We can't help this // in the multi-threaded case, but we special-case n=1 here to get // repeatable measurements of the 1-thread overhead of the parallel code. if (n_workers > 1) { ! // Make refs discovery MT-safe, if it isn't already ! ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); GenCollectedHeap::StrongRootsScope srs(gch); workers->run_task(&tsk); } else { GenCollectedHeap::StrongRootsScope srs(gch); tsk.work(0);
*** 5701,5717 **** --- 5704,5725 ---- CMSCollector* collector, const MemRegion& span, CMSBitMap* mark_bit_map, AbstractWorkGang* workers, OopTaskQueueSet* task_queues): + // XXX Should superclass AGTWOQ also know about AWG since it knows + // about the task_queues used by the AWG? Then it could initialize + // the terminator() object. See 6984287. The set_for_termination() + // below is a temporary band-aid for the regression in 6984287. AbstractGangTaskWOopQueues("Process referents by policy in parallel", task_queues), _task(task), _collector(collector), _span(span), _mark_bit_map(mark_bit_map) { assert(_collector->_span.equals(_span) && !_span.is_empty(), "Inconsistency in _span"); + set_for_termination(workers->active_workers()); } OopTaskQueueSet* task_queues() { return queues(); } OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
*** 5870,5881 **** // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). ! ! rp->set_mt_degree(ParallelGCThreads); CMSRefProcTaskExecutor task_executor(*this); rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor); --- 5878,5888 ---- // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). ! rp->set_active_mt_degree(ParallelGCThreads); CMSRefProcTaskExecutor task_executor(*this); rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor);