< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13328 : [mq]: webrev.0b
rev 13329 : [mq]: webrev.1
rev 13330 : imported patch webrev.2
rev 13331 : imported patch webrev.3b
rev 13332 : [mq]: webrev.4


1243       // how reference processing currently works in G1.
1244 
1245       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1246       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1247 
1248       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1249       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1250 
1251       ref_processor_stw()->enable_discovery();
1252       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1253 
1254       // Do collection work
1255       {
1256         HandleMark hm;  // Discard invalid handles created during gc
1257         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1258       }
1259 
1260       assert(num_free_regions() == 0, "we should not have added any free regions");
1261       rebuild_region_sets(false /* free_list_only */);
1262 
1263       ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q(), ref_processor_stw()->processing_is_mt());
1264 
1265       // Enqueue any discovered reference objects that have
1266       // not been removed from the discovered lists.
1267       ref_processor_stw()->enqueue_discovered_references(NULL, &pt);
1268 
1269       pt.print_enqueue_phase();
1270 
1271 #if defined(COMPILER2) || INCLUDE_JVMCI
1272       DerivedPointerTable::update_pointers();
1273 #endif
1274 
1275       MemoryService::track_memory_usage();
1276 
1277       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1278       ref_processor_stw()->verify_no_references_recorded();
1279 
1280       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1281       ClassLoaderDataGraph::purge();
1282       MetaspaceAux::verify_metrics();
1283 


1654   // should not not be holding to any GC alloc regions. The method
1655   // below will make sure of that and do any remaining clean up.
1656   _allocator->abandon_gc_alloc_regions();
1657 
1658   // Instead of tearing down / rebuilding the free lists here, we
1659   // could instead use the remove_all_pending() method on free_list to
1660   // remove only the ones that we need to remove.
1661   tear_down_region_sets(true /* free_list_only */);
1662   shrink_helper(shrink_bytes);
1663   rebuild_region_sets(true /* free_list_only */);
1664 
1665   _hrm.verify_optional();
1666   _verifier->verify_region_sets_optional();
1667 }
1668 
1669 // Public methods.
1670 
1671 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1672   CollectedHeap(),
1673   _collector_policy(collector_policy),
1674   _g1_policy(create_g1_policy()),


1675   _collection_set(this, _g1_policy),
1676   _dirty_card_queue_set(false),
1677   _is_alive_closure_cm(this),
1678   _is_alive_closure_stw(this),
1679   _ref_processor_cm(NULL),
1680   _ref_processor_stw(NULL),
1681   _bot(NULL),
1682   _hot_card_cache(NULL),
1683   _g1_rem_set(NULL),
1684   _cg1r(NULL),
1685   _g1mm(NULL),
1686   _preserved_marks_set(true /* in_c_heap */),
1687   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1688   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1689   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1690   _humongous_reclaim_candidates(),
1691   _has_humongous_reclaim_candidates(false),
1692   _archive_allocator(NULL),
1693   _free_regions_coming(false),
1694   _gc_time_stamp(0),
1695   _summary_bytes_used(0),
1696   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1697   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1698   _expand_heap_after_alloc_failure(true),
1699   _old_marking_cycles_started(0),
1700   _old_marking_cycles_completed(0),
1701   _in_cset_fast_test(),
1702   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1703   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1704 
1705   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1706                           /* are_GC_task_threads */true,
1707                           /* are_ConcurrentGC_threads */false);
1708   _workers->initialize_workers();
1709   _verifier = new G1HeapVerifier(this);
1710 
1711   _allocator = G1Allocator::create_allocator(this);
1712 
1713   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1714 
1715   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1716 
1717   // Override the default _filler_array_max_size so that no humongous filler
1718   // objects are created.
1719   _filler_array_max_size = _humongous_object_threshold_in_words;
1720 
1721   uint n_queues = ParallelGCThreads;
1722   _task_queues = new RefToScanQueueSet(n_queues);
1723 


2037                            &_is_alive_closure_cm);
2038                                 // is alive closure
2039                                 // (for efficiency/performance)
2040 
2041   // STW ref processor
2042   _ref_processor_stw =
2043     new ReferenceProcessor(mr,    // span
2044                            mt_processing,
2045                                 // mt processing
2046                            ParallelGCThreads,
2047                                 // degree of mt processing
2048                            (ParallelGCThreads > 1),
2049                                 // mt discovery
2050                            ParallelGCThreads,
2051                                 // degree of mt discovery
2052                            true,
2053                                 // Reference discovery is atomic
2054                            &_is_alive_closure_stw);
2055                                 // is alive closure
2056                                 // (for efficiency/performance)
2057     _ref_phase_times = new ReferenceProcessorPhaseTimes(_gc_timer_stw,
2058                                                         ParallelGCThreads,
2059                                                         mt_processing);
2060 }
2061 
2062 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2063   return _collector_policy;
2064 }
2065 
2066 size_t G1CollectedHeap::capacity() const {
2067   return _hrm.length() * HeapRegion::GrainBytes;
2068 }
2069 
2070 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2071   hr->reset_gc_time_stamp();
2072 }
2073 
2074 #ifndef PRODUCT
2075 
2076 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2077 private:
2078   unsigned _gc_time_stamp;
2079   bool _failures;


4305 
4306   // Even when parallel reference processing is enabled, the processing
4307   // of JNI refs is serial and performed serially by the current thread
4308   // rather than by a worker. The following PSS will be used for processing
4309   // JNI refs.
4310 
4311   // Use only a single queue for this PSS.
4312   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4313   pss->set_ref_processor(NULL);
4314   assert(pss->queue_is_empty(), "pre-condition");
4315 
4316   // Keep alive closure.
4317   G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4318 
4319   // Serial Complete GC closure
4320   G1STWDrainQueueClosure drain_queue(this, pss);
4321 
4322   // Setup the soft refs policy...
4323   rp->setup_policy(false);
4324 
4325   ref_phase_times()->reset();
4326 
4327   ReferenceProcessorStats stats;
4328   if (!rp->processing_is_mt()) {
4329     // Serial reference processing...
4330     stats = rp->process_discovered_references(&is_alive,
4331                                               &keep_alive,
4332                                               &drain_queue,
4333                                               NULL,
4334                                               ref_phase_times());
4335   } else {
4336     uint no_of_gc_workers = workers()->active_workers();
4337 
4338     // Parallel reference processing
4339     assert(no_of_gc_workers <= rp->max_num_q(),
4340            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4341            no_of_gc_workers,  rp->max_num_q());
4342 
4343     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4344     stats = rp->process_discovered_references(&is_alive,
4345                                               &keep_alive,
4346                                               &drain_queue,
4347                                               &par_task_executor,
4348                                               ref_phase_times());
4349   }
4350 
4351   _gc_tracer_stw->report_gc_reference_stats(stats);
4352 
4353   // We have completed copying any necessary live referent objects.
4354   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4355 
4356   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4357   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4358 }
4359 
4360 // Weak Reference processing during an evacuation pause (part 2).
4361 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4362   double ref_enq_start = os::elapsedTime();
4363 
4364   ReferenceProcessor* rp = _ref_processor_stw;
4365   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4366 


4367   // Now enqueue any remaining on the discovered lists on to
4368   // the pending list.
4369   if (!rp->processing_is_mt()) {
4370     // Serial reference processing...
4371     rp->enqueue_discovered_references(NULL, ref_phase_times());
4372   } else {
4373     // Parallel reference enqueueing
4374 
4375     uint n_workers = workers()->active_workers();
4376 
4377     assert(n_workers <= rp->max_num_q(),
4378            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4379            n_workers,  rp->max_num_q());
4380 
4381     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4382     rp->enqueue_discovered_references(&par_task_executor, ref_phase_times());
4383   }
4384 
4385   rp->verify_no_references_recorded();
4386   assert(!rp->discovery_enabled(), "should have been disabled");
4387 
4388   // FIXME
4389   // CM's reference processing also cleans up the string and symbol tables.
4390   // Should we do that here also? We could, but it is a serial operation
4391   // and could significantly increase the pause time.
4392 
4393   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4394   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4395 }
4396 
4397 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4398   double merge_pss_time_start = os::elapsedTime();
4399   per_thread_states->flush();
4400   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4401 }
4402 




1243       // how reference processing currently works in G1.
1244 
1245       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1246       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1247 
1248       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1249       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1250 
1251       ref_processor_stw()->enable_discovery();
1252       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1253 
1254       // Do collection work
1255       {
1256         HandleMark hm;  // Discard invalid handles created during gc
1257         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1258       }
1259 
1260       assert(num_free_regions() == 0, "we should not have added any free regions");
1261       rebuild_region_sets(false /* free_list_only */);
1262 
1263       ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q());
1264 
1265       // Enqueue any discovered reference objects that have
1266       // not been removed from the discovered lists.
1267       ref_processor_stw()->enqueue_discovered_references(NULL, &pt);
1268 
1269       pt.print_enqueue_phase();
1270 
1271 #if defined(COMPILER2) || INCLUDE_JVMCI
1272       DerivedPointerTable::update_pointers();
1273 #endif
1274 
1275       MemoryService::track_memory_usage();
1276 
1277       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1278       ref_processor_stw()->verify_no_references_recorded();
1279 
1280       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1281       ClassLoaderDataGraph::purge();
1282       MetaspaceAux::verify_metrics();
1283 


1654   // should not not be holding to any GC alloc regions. The method
1655   // below will make sure of that and do any remaining clean up.
1656   _allocator->abandon_gc_alloc_regions();
1657 
1658   // Instead of tearing down / rebuilding the free lists here, we
1659   // could instead use the remove_all_pending() method on free_list to
1660   // remove only the ones that we need to remove.
1661   tear_down_region_sets(true /* free_list_only */);
1662   shrink_helper(shrink_bytes);
1663   rebuild_region_sets(true /* free_list_only */);
1664 
1665   _hrm.verify_optional();
1666   _verifier->verify_region_sets_optional();
1667 }
1668 
1669 // Public methods.
1670 
1671 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1672   CollectedHeap(),
1673   _collector_policy(collector_policy),
1674   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1675   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1676   _g1_policy(create_g1_policy(_gc_timer_stw)),
1677   _collection_set(this, _g1_policy),
1678   _dirty_card_queue_set(false),
1679   _is_alive_closure_cm(this),
1680   _is_alive_closure_stw(this),
1681   _ref_processor_cm(NULL),
1682   _ref_processor_stw(NULL),
1683   _bot(NULL),
1684   _hot_card_cache(NULL),
1685   _g1_rem_set(NULL),
1686   _cg1r(NULL),
1687   _g1mm(NULL),
1688   _preserved_marks_set(true /* in_c_heap */),
1689   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1690   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1691   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1692   _humongous_reclaim_candidates(),
1693   _has_humongous_reclaim_candidates(false),
1694   _archive_allocator(NULL),
1695   _free_regions_coming(false),
1696   _gc_time_stamp(0),
1697   _summary_bytes_used(0),
1698   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1699   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1700   _expand_heap_after_alloc_failure(true),
1701   _old_marking_cycles_started(0),
1702   _old_marking_cycles_completed(0),
1703   _in_cset_fast_test() {


1704 
1705   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1706                           /* are_GC_task_threads */true,
1707                           /* are_ConcurrentGC_threads */false);
1708   _workers->initialize_workers();
1709   _verifier = new G1HeapVerifier(this);
1710 
1711   _allocator = G1Allocator::create_allocator(this);
1712 
1713   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1714 
1715   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1716 
1717   // Override the default _filler_array_max_size so that no humongous filler
1718   // objects are created.
1719   _filler_array_max_size = _humongous_object_threshold_in_words;
1720 
1721   uint n_queues = ParallelGCThreads;
1722   _task_queues = new RefToScanQueueSet(n_queues);
1723 


2037                            &_is_alive_closure_cm);
2038                                 // is alive closure
2039                                 // (for efficiency/performance)
2040 
2041   // STW ref processor
2042   _ref_processor_stw =
2043     new ReferenceProcessor(mr,    // span
2044                            mt_processing,
2045                                 // mt processing
2046                            ParallelGCThreads,
2047                                 // degree of mt processing
2048                            (ParallelGCThreads > 1),
2049                                 // mt discovery
2050                            ParallelGCThreads,
2051                                 // degree of mt discovery
2052                            true,
2053                                 // Reference discovery is atomic
2054                            &_is_alive_closure_stw);
2055                                 // is alive closure
2056                                 // (for efficiency/performance)



2057 }
2058 
2059 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2060   return _collector_policy;
2061 }
2062 
2063 size_t G1CollectedHeap::capacity() const {
2064   return _hrm.length() * HeapRegion::GrainBytes;
2065 }
2066 
2067 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2068   hr->reset_gc_time_stamp();
2069 }
2070 
2071 #ifndef PRODUCT
2072 
2073 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2074 private:
2075   unsigned _gc_time_stamp;
2076   bool _failures;


4302 
4303   // Even when parallel reference processing is enabled, the processing
4304   // of JNI refs is serial and performed serially by the current thread
4305   // rather than by a worker. The following PSS will be used for processing
4306   // JNI refs.
4307 
4308   // Use only a single queue for this PSS.
4309   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4310   pss->set_ref_processor(NULL);
4311   assert(pss->queue_is_empty(), "pre-condition");
4312 
4313   // Keep alive closure.
4314   G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4315 
4316   // Serial Complete GC closure
4317   G1STWDrainQueueClosure drain_queue(this, pss);
4318 
4319   // Setup the soft refs policy...
4320   rp->setup_policy(false);
4321 
4322   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
4323 
4324   ReferenceProcessorStats stats;
4325   if (!rp->processing_is_mt()) {
4326     // Serial reference processing...
4327     stats = rp->process_discovered_references(&is_alive,
4328                                               &keep_alive,
4329                                               &drain_queue,
4330                                               NULL,
4331                                               pt);
4332   } else {
4333     uint no_of_gc_workers = workers()->active_workers();
4334 
4335     // Parallel reference processing
4336     assert(no_of_gc_workers <= rp->max_num_q(),
4337            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4338            no_of_gc_workers,  rp->max_num_q());
4339 
4340     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4341     stats = rp->process_discovered_references(&is_alive,
4342                                               &keep_alive,
4343                                               &drain_queue,
4344                                               &par_task_executor,
4345                                               pt);
4346   }
4347 
4348   _gc_tracer_stw->report_gc_reference_stats(stats);
4349 
4350   // We have completed copying any necessary live referent objects.
4351   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4352 
4353   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4354   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4355 }
4356 
4357 // Weak Reference processing during an evacuation pause (part 2).
4358 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4359   double ref_enq_start = os::elapsedTime();
4360 
4361   ReferenceProcessor* rp = _ref_processor_stw;
4362   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4363 
4364   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
4365 
4366   // Now enqueue any remaining on the discovered lists on to
4367   // the pending list.
4368   if (!rp->processing_is_mt()) {
4369     // Serial reference processing...
4370     rp->enqueue_discovered_references(NULL, pt);
4371   } else {
4372     // Parallel reference enqueueing
4373 
4374     uint n_workers = workers()->active_workers();
4375 
4376     assert(n_workers <= rp->max_num_q(),
4377            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4378            n_workers,  rp->max_num_q());
4379 
4380     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4381     rp->enqueue_discovered_references(&par_task_executor, pt);
4382   }
4383 
4384   rp->verify_no_references_recorded();
4385   assert(!rp->discovery_enabled(), "should have been disabled");
4386 
4387   // FIXME
4388   // CM's reference processing also cleans up the string and symbol tables.
4389   // Should we do that here also? We could, but it is a serial operation
4390   // and could significantly increase the pause time.
4391 
4392   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4393   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4394 }
4395 
4396 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4397   double merge_pss_time_start = os::elapsedTime();
4398   per_thread_states->flush();
4399   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4400 }
4401 


< prev index next >