< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13328 : [mq]: webrev.0b
rev 13329 : [mq]: webrev.1
rev 13330 : imported patch webrev.2
rev 13331 : imported patch webrev.3b
rev 13332 : [mq]: webrev.4

*** 1258,1270 **** } assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); // Enqueue any discovered reference objects that have // not been removed from the discovered lists. ! ref_processor_stw()->enqueue_discovered_references(); #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::update_pointers(); #endif --- 1258,1274 ---- } assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); + ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q()); + // Enqueue any discovered reference objects that have // not been removed from the discovered lists. ! ref_processor_stw()->enqueue_discovered_references(NULL, &pt); ! ! pt.print_enqueue_phase(); #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::update_pointers(); #endif
*** 1665,1675 **** // Public methods. G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : CollectedHeap(), _collector_policy(collector_policy), ! _g1_policy(create_g1_policy()), _collection_set(this, _g1_policy), _dirty_card_queue_set(false), _is_alive_closure_cm(this), _is_alive_closure_stw(this), _ref_processor_cm(NULL), --- 1669,1681 ---- // Public methods. G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : CollectedHeap(), _collector_policy(collector_policy), ! _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), ! _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), ! _g1_policy(create_g1_policy(_gc_timer_stw)), _collection_set(this, _g1_policy), _dirty_card_queue_set(false), _is_alive_closure_cm(this), _is_alive_closure_stw(this), _ref_processor_cm(NULL),
*** 1692,1704 **** _survivor_evac_stats("Young", YoungPLABSize, PLABWeight), _old_evac_stats("Old", OldPLABSize, PLABWeight), _expand_heap_after_alloc_failure(true), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), ! _in_cset_fast_test(), ! _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), ! _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) { _workers = new WorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); _workers->initialize_workers(); --- 1698,1708 ---- _survivor_evac_stats("Young", YoungPLABSize, PLABWeight), _old_evac_stats("Old", OldPLABSize, PLABWeight), _expand_heap_after_alloc_failure(true), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), ! _in_cset_fast_test() { _workers = new WorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); _workers->initialize_workers();
*** 2013,2026 **** // * Discovery is atomic - i.e. not concurrent. // * Reference discovery will not need a barrier. MemRegion mr = reserved_region(); // Concurrent Mark ref processor _ref_processor_cm = new ReferenceProcessor(mr, // span ! ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery --- 2017,2032 ---- // * Discovery is atomic - i.e. not concurrent. // * Reference discovery will not need a barrier. MemRegion mr = reserved_region(); + bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1); + // Concurrent Mark ref processor _ref_processor_cm = new ReferenceProcessor(mr, // span ! mt_processing, // mt processing ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
*** 2033,2043 **** // (for efficiency/performance) // STW ref processor _ref_processor_stw = new ReferenceProcessor(mr, // span ! ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1), // mt discovery --- 2039,2049 ---- // (for efficiency/performance) // STW ref processor _ref_processor_stw = new ReferenceProcessor(mr, // span ! mt_processing, // mt processing ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1), // mt discovery
*** 4311,4328 **** G1STWDrainQueueClosure drain_queue(this, pss); // Setup the soft refs policy... rp->setup_policy(false); ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { // Serial reference processing... stats = rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, NULL, ! _gc_timer_stw); } else { uint no_of_gc_workers = workers()->active_workers(); // Parallel reference processing assert(no_of_gc_workers <= rp->max_num_q(), --- 4317,4336 ---- G1STWDrainQueueClosure drain_queue(this, pss); // Setup the soft refs policy... rp->setup_policy(false); + ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); + ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { // Serial reference processing... stats = rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, NULL, ! pt); } else { uint no_of_gc_workers = workers()->active_workers(); // Parallel reference processing assert(no_of_gc_workers <= rp->max_num_q(),
*** 4332,4342 **** G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers); stats = rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor, ! _gc_timer_stw); } _gc_tracer_stw->report_gc_reference_stats(stats); // We have completed copying any necessary live referent objects. --- 4340,4350 ---- G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers); stats = rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor, ! pt); } _gc_tracer_stw->report_gc_reference_stats(stats); // We have completed copying any necessary live referent objects.
*** 4351,4376 **** double ref_enq_start = os::elapsedTime(); ReferenceProcessor* rp = _ref_processor_stw; assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); // Now enqueue any remaining on the discovered lists on to // the pending list. if (!rp->processing_is_mt()) { // Serial reference processing... ! rp->enqueue_discovered_references(); } else { // Parallel reference enqueueing uint n_workers = workers()->active_workers(); assert(n_workers <= rp->max_num_q(), "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u", n_workers, rp->max_num_q()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers); ! rp->enqueue_discovered_references(&par_task_executor); } rp->verify_no_references_recorded(); assert(!rp->discovery_enabled(), "should have been disabled"); --- 4359,4386 ---- double ref_enq_start = os::elapsedTime(); ReferenceProcessor* rp = _ref_processor_stw; assert(!rp->discovery_enabled(), "should have been disabled as part of processing"); + ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times(); + // Now enqueue any remaining on the discovered lists on to // the pending list. if (!rp->processing_is_mt()) { // Serial reference processing... ! rp->enqueue_discovered_references(NULL, pt); } else { // Parallel reference enqueueing uint n_workers = workers()->active_workers(); assert(n_workers <= rp->max_num_q(), "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u", n_workers, rp->max_num_q()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers); ! rp->enqueue_discovered_references(&par_task_executor, pt); } rp->verify_no_references_recorded(); assert(!rp->discovery_enabled(), "should have been disabled");
< prev index next >