< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 8816 : [mq]: 8133470-fix-plab-inline
rev 8817 : imported patch 8073013-add-detailed-information-about-plab-memory-usage
rev 8818 : imported patch jon-review-statistics
rev 8819 : imported patch dlindholm-changes
rev 8821 : imported patch move-jfr-event-to-extra-cr
rev 8822 : imported patch 8133530-add-jfr-event-for-evacuation
rev 8823 : imported patch 8040162-avoid-reallocating-plab-allocators
rev 8824 : imported patch parallel-ref-proc-fixes
rev 8825 : imported patch mikael-suggestions-pss-alloc


4452       _closure->set_scanned_klass(klass);
4453 
4454       klass->oops_do(_closure);
4455 
4456       _closure->set_scanned_klass(NULL);
4457     }
4458     _count++;
4459   }
4460 };
4461 
4462 class G1ParTask : public AbstractGangTask {
4463 protected:
4464   G1CollectedHeap*       _g1h;
4465   G1ParScanThreadState** _pss;
4466   RefToScanQueueSet*     _queues;
4467   G1RootProcessor*       _root_processor;
4468   ParallelTaskTerminator _terminator;
4469   uint _n_workers;
4470 
4471 public:
4472   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadState** pss, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
4473     : AbstractGangTask("G1 collection"),
4474       _g1h(g1h),
4475       _pss(pss),
4476       _queues(task_queues),
4477       _root_processor(root_processor),
4478       _terminator(n_workers, _queues),
4479       _n_workers(n_workers)
4480   {}
4481 
4482   RefToScanQueueSet* queues() { return _queues; }
4483 
4484   RefToScanQueue *work_queue(int i) {
4485     return queues()->queue(i);
4486   }
4487 
4488   ParallelTaskTerminator* terminator() { return &_terminator; }
4489 
4490   // Helps out with CLD processing.
4491   //
4492   // During InitialMark we need to:
4493   // 1) Scavenge all CLDs for the young GC.
4494   // 2) Mark all objects directly reachable from strong CLDs.
4495   template <G1Mark do_mark_object>


4559 
4560       if (_g1h->collector_state()->during_initial_mark_pause()) {
4561         // We also need to mark copied objects.
4562         strong_root_cl = &scan_mark_root_cl;
4563         strong_cld_cl  = &scan_mark_cld_cl;
4564         if (ClassUnloadingWithConcurrentMark) {
4565           weak_root_cl = &scan_mark_weak_root_cl;
4566           weak_cld_cl  = &scan_mark_weak_cld_cl;
4567           trace_metadata = true;
4568         } else {
4569           weak_root_cl = &scan_mark_root_cl;
4570           weak_cld_cl  = &scan_mark_cld_cl;
4571         }
4572       } else {
4573         strong_root_cl = &scan_only_root_cl;
4574         weak_root_cl   = &scan_only_root_cl;
4575         strong_cld_cl  = &scan_only_cld_cl;
4576         weak_cld_cl    = &scan_only_cld_cl;
4577       }
4578 

4579       _root_processor->evacuate_roots(strong_root_cl,
4580                                       weak_root_cl,
4581                                       strong_cld_cl,
4582                                       weak_cld_cl,
4583                                       trace_metadata,
4584                                       worker_id);
4585 
4586       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
4587       double start_strong_roots_sec = os::elapsedTime();
4588       _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4589                                             weak_root_cl,
4590                                             worker_id);
4591       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4592 
4593       double term_sec = 0.0;
4594       size_t evac_term_attempts = 0;
4595       {
4596         double start = os::elapsedTime();
4597         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4598         evac.do_void();
4599 
4600         evac_term_attempts = evac.term_attempts();
4601         term_sec = evac.term_time();
4602         double elapsed_sec = os::elapsedTime() - start;
4603         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4604         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4605         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4606       }
4607 


5179     G1ParScanThreadState* const pss = par_scan_state();
5180     pss->trim_queue();
5181   }
5182 };
5183 
5184 // Parallel Reference Processing closures
5185 
5186 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5187 // processing during G1 evacuation pauses.
5188 
5189 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5190 private:
5191   G1CollectedHeap*        _g1h;
5192   G1ParScanThreadState**  _pss;
5193   RefToScanQueueSet*      _queues;
5194   FlexibleWorkGang*       _workers;
5195   uint                    _active_workers;
5196 
5197 public:
5198   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5199                            G1ParScanThreadState** pss,
5200                            FlexibleWorkGang* workers,
5201                            RefToScanQueueSet *task_queues,
5202                            uint n_workers) :
5203     _g1h(g1h),
5204     _pss(pss),
5205     _queues(task_queues),
5206     _workers(workers),
5207     _active_workers(n_workers)
5208   {
5209     assert(n_workers > 0, "shouldn't call this otherwise");
5210   }
5211 
5212   // Executes the given task using concurrent marking worker threads.
5213   virtual void execute(ProcessTask& task);
5214   virtual void execute(EnqueueTask& task);
5215 };
5216 
5217 // Gang task for possibly parallel reference processing
5218 
5219 class G1STWRefProcTaskProxy: public AbstractGangTask {
5220   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5221   ProcessTask&     _proc_task;
5222   G1CollectedHeap* _g1h;
5223   G1ParScanThreadState** _pss;
5224   RefToScanQueueSet* _task_queues;
5225   ParallelTaskTerminator* _terminator;
5226 
5227 public:
5228   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5229                         G1CollectedHeap* g1h,
5230                         G1ParScanThreadState** pss,
5231                         RefToScanQueueSet *task_queues,
5232                         ParallelTaskTerminator* terminator) :
5233     AbstractGangTask("Process reference objects in parallel"),
5234     _proc_task(proc_task),
5235     _g1h(g1h),
5236     _pss(pss),
5237     _task_queues(task_queues),
5238     _terminator(terminator)
5239   {}
5240 
5241   virtual void work(uint worker_id) {
5242     // The reference processing task executed by a single worker.
5243     ResourceMark rm;
5244     HandleMark   hm;
5245 
5246     G1STWIsAliveClosure is_alive(_g1h);
5247 
5248     G1ParScanThreadState*           pss = _pss[worker_id];
5249     pss->set_ref_processor(NULL);
5250 
5251     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
5252 
5253     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
5254 
5255     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5256 


5314   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5315 
5316   _workers->run_task(&enq_task_proxy);
5317 }
5318 
5319 // End of weak reference support closures
5320 
5321 // Abstract task used to preserve (i.e. copy) any referent objects
5322 // that are in the collection set and are pointed to by reference
5323 // objects discovered by the CM ref processor.
5324 
5325 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5326 protected:
5327   G1CollectedHeap*       _g1h;
5328   G1ParScanThreadState** _pss;
5329   RefToScanQueueSet*     _queues;
5330   ParallelTaskTerminator _terminator;
5331   uint _n_workers;
5332 
5333 public:
5334   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadState** pss, int workers, RefToScanQueueSet *task_queues) :
5335     AbstractGangTask("ParPreserveCMReferents"),
5336     _g1h(g1h),
5337     _pss(pss),
5338     _queues(task_queues),
5339     _terminator(workers, _queues),
5340     _n_workers(workers)
5341   { }
5342 
5343   void work(uint worker_id) {
5344     ResourceMark rm;
5345     HandleMark   hm;
5346 
5347     G1ParScanThreadState*          pss = _pss[worker_id];
5348     pss->set_ref_processor(NULL);
5349     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5350 
5351     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
5352 
5353     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
5354 
5355     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5356 
5357     if (_g1h->collector_state()->during_initial_mark_pause()) {


5388         iter.load_ptrs(DEBUG_ONLY(true));
5389         oop ref = iter.obj();
5390 
5391         // This will filter nulls.
5392         if (iter.is_referent_alive()) {
5393           iter.make_referent_alive();
5394         }
5395         iter.move_to_next();
5396       }
5397     }
5398 
5399     // Drain the queue - which may cause stealing
5400     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
5401     drain_queue.do_void();
5402     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5403     assert(pss->queue_is_empty(), "should be");
5404   }
5405 };
5406 
5407 // Weak Reference processing during an evacuation pause (part 1).
5408 void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** pss_) {
5409   double ref_proc_start = os::elapsedTime();
5410 
5411   ReferenceProcessor* rp = _ref_processor_stw;
5412   assert(rp->discovery_enabled(), "should have been enabled");
5413 
5414   // Any reference objects, in the collection set, that were 'discovered'
5415   // by the CM ref processor should have already been copied (either by
5416   // applying the external root copy closure to the discovered lists, or
5417   // by following an RSet entry).
5418   //
5419   // But some of the referents, that are in the collection set, that these
5420   // reference objects point to may not have been copied: the STW ref
5421   // processor would have seen that the reference object had already
5422   // been 'discovered' and would have skipped discovering the reference,
5423   // but would not have treated the reference object as a regular oop.
5424   // As a result the copy closure would not have been applied to the
5425   // referent object.
5426   //
5427   // We need to explicitly copy these referent objects - the references
5428   // will be processed at the end of remarking.
5429   //
5430   // We also need to do this copying before we process the reference
5431   // objects discovered by the STW ref processor in case one of these
5432   // referents points to another object which is also referenced by an
5433   // object discovered by the STW ref processor.
5434 
5435   uint no_of_gc_workers = workers()->active_workers();
5436 
5437   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5438                                                  pss_,
5439                                                  no_of_gc_workers,
5440                                                  _task_queues);
5441 
5442   workers()->run_task(&keep_cm_referents);
5443 
5444   // Closure to test whether a referent is alive.
5445   G1STWIsAliveClosure is_alive(this);
5446 
5447   // Even when parallel reference processing is enabled, the processing
5448   // of JNI refs is serial and performed serially by the current thread
5449   // rather than by a worker. The following PSS will be used for processing
5450   // JNI refs.
5451 
5452   // Use only a single queue for this PSS.
5453   G1ParScanThreadState*           pss = pss_[0];
5454   pss->set_ref_processor(NULL);
5455   assert(pss->queue_is_empty(), "pre-condition");
5456 
5457   // We do not embed a reference processor in the copying/scanning
5458   // closures while we're actually processing the discovered
5459   // reference objects.
5460 
5461   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss, NULL);
5462 
5463   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
5464 
5465   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5466 
5467   if (collector_state()->during_initial_mark_pause()) {
5468     // We also need to mark copied objects.
5469     copy_non_heap_cl = &copy_mark_non_heap_cl;
5470   }
5471 
5472   // Keep alive closure.
5473   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);


5475   // Serial Complete GC closure
5476   G1STWDrainQueueClosure drain_queue(this, pss);
5477 
5478   // Setup the soft refs policy...
5479   rp->setup_policy(false);
5480 
5481   ReferenceProcessorStats stats;
5482   if (!rp->processing_is_mt()) {
5483     // Serial reference processing...
5484     stats = rp->process_discovered_references(&is_alive,
5485                                               &keep_alive,
5486                                               &drain_queue,
5487                                               NULL,
5488                                               _gc_timer_stw,
5489                                               _gc_tracer_stw->gc_id());
5490   } else {
5491     // Parallel reference processing
5492     assert(rp->num_q() == no_of_gc_workers, "sanity");
5493     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5494 
5495     G1STWRefProcTaskExecutor par_task_executor(this, pss_, workers(), _task_queues, no_of_gc_workers);
5496     stats = rp->process_discovered_references(&is_alive,
5497                                               &keep_alive,
5498                                               &drain_queue,
5499                                               &par_task_executor,
5500                                               _gc_timer_stw,
5501                                               _gc_tracer_stw->gc_id());
5502   }
5503 
5504   _gc_tracer_stw->report_gc_reference_stats(stats);
5505 
5506   // We have completed copying any necessary live referent objects.
5507   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5508 
5509   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5510   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5511 }
5512 
5513 // Weak Reference processing during an evacuation pause (part 2).
5514 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadState** pss) {
5515   double ref_enq_start = os::elapsedTime();
5516 
5517   ReferenceProcessor* rp = _ref_processor_stw;
5518   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5519 
5520   // Now enqueue any remaining on the discovered lists on to
5521   // the pending list.
5522   if (!rp->processing_is_mt()) {
5523     // Serial reference processing...
5524     rp->enqueue_discovered_references();
5525   } else {
5526     // Parallel reference enqueueing
5527 
5528     uint n_workers = workers()->active_workers();
5529 
5530     assert(rp->num_q() == n_workers, "sanity");
5531     assert(n_workers <= rp->max_num_q(), "sanity");
5532 
5533     G1STWRefProcTaskExecutor par_task_executor(this, pss, workers(), _task_queues, n_workers);
5534     rp->enqueue_discovered_references(&par_task_executor);
5535   }
5536 
5537   rp->verify_no_references_recorded();
5538   assert(!rp->discovery_enabled(), "should have been disabled");
5539 
5540   // FIXME
5541   // CM's reference processing also cleans up the string and symbol tables.
5542   // Should we do that here also? We could, but it is a serial operation
5543   // and could significantly increase the pause time.
5544 
5545   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5546   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5547 }
5548 
5549 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5550   _expand_heap_after_alloc_failure = true;
5551   _evacuation_failed = false;
5552 
5553   // Should G1EvacuationFailureALot be in effect for this GC?




4452       _closure->set_scanned_klass(klass);
4453 
4454       klass->oops_do(_closure);
4455 
4456       _closure->set_scanned_klass(NULL);
4457     }
4458     _count++;
4459   }
4460 };
4461 
4462 class G1ParTask : public AbstractGangTask {
4463 protected:
4464   G1CollectedHeap*       _g1h;
4465   G1ParScanThreadState** _pss;
4466   RefToScanQueueSet*     _queues;
4467   G1RootProcessor*       _root_processor;
4468   ParallelTaskTerminator _terminator;
4469   uint _n_workers;
4470 
4471 public:
4472   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadState** per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
4473     : AbstractGangTask("G1 collection"),
4474       _g1h(g1h),
4475       _pss(per_thread_states),
4476       _queues(task_queues),
4477       _root_processor(root_processor),
4478       _terminator(n_workers, _queues),
4479       _n_workers(n_workers)
4480   {}
4481 
4482   RefToScanQueueSet* queues() { return _queues; }
4483 
4484   RefToScanQueue *work_queue(int i) {
4485     return queues()->queue(i);
4486   }
4487 
4488   ParallelTaskTerminator* terminator() { return &_terminator; }
4489 
4490   // Helps out with CLD processing.
4491   //
4492   // During InitialMark we need to:
4493   // 1) Scavenge all CLDs for the young GC.
4494   // 2) Mark all objects directly reachable from strong CLDs.
4495   template <G1Mark do_mark_object>


4559 
4560       if (_g1h->collector_state()->during_initial_mark_pause()) {
4561         // We also need to mark copied objects.
4562         strong_root_cl = &scan_mark_root_cl;
4563         strong_cld_cl  = &scan_mark_cld_cl;
4564         if (ClassUnloadingWithConcurrentMark) {
4565           weak_root_cl = &scan_mark_weak_root_cl;
4566           weak_cld_cl  = &scan_mark_weak_cld_cl;
4567           trace_metadata = true;
4568         } else {
4569           weak_root_cl = &scan_mark_root_cl;
4570           weak_cld_cl  = &scan_mark_cld_cl;
4571         }
4572       } else {
4573         strong_root_cl = &scan_only_root_cl;
4574         weak_root_cl   = &scan_only_root_cl;
4575         strong_cld_cl  = &scan_only_cld_cl;
4576         weak_cld_cl    = &scan_only_cld_cl;
4577       }
4578 
4579       double start_strong_roots_sec = os::elapsedTime();
4580       _root_processor->evacuate_roots(strong_root_cl,
4581                                       weak_root_cl,
4582                                       strong_cld_cl,
4583                                       weak_cld_cl,
4584                                       trace_metadata,
4585                                       worker_id);
4586 
4587       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);

4588       _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4589                                             weak_root_cl,
4590                                             worker_id);
4591       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4592 
4593       double term_sec = 0.0;
4594       size_t evac_term_attempts = 0;
4595       {
4596         double start = os::elapsedTime();
4597         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4598         evac.do_void();
4599 
4600         evac_term_attempts = evac.term_attempts();
4601         term_sec = evac.term_time();
4602         double elapsed_sec = os::elapsedTime() - start;
4603         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4604         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4605         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4606       }
4607 


5179     G1ParScanThreadState* const pss = par_scan_state();
5180     pss->trim_queue();
5181   }
5182 };
5183 
5184 // Parallel Reference Processing closures
5185 
5186 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5187 // processing during G1 evacuation pauses.
5188 
5189 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5190 private:
5191   G1CollectedHeap*        _g1h;
5192   G1ParScanThreadState**  _pss;
5193   RefToScanQueueSet*      _queues;
5194   FlexibleWorkGang*       _workers;
5195   uint                    _active_workers;
5196 
5197 public:
5198   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5199                            G1ParScanThreadState** per_thread_states,
5200                            FlexibleWorkGang* workers,
5201                            RefToScanQueueSet *task_queues,
5202                            uint n_workers) :
5203     _g1h(g1h),
5204     _pss(per_thread_states),
5205     _queues(task_queues),
5206     _workers(workers),
5207     _active_workers(n_workers)
5208   {
5209     assert(n_workers > 0, "shouldn't call this otherwise");
5210   }
5211 
5212   // Executes the given task using concurrent marking worker threads.
5213   virtual void execute(ProcessTask& task);
5214   virtual void execute(EnqueueTask& task);
5215 };
5216 
5217 // Gang task for possibly parallel reference processing
5218 
5219 class G1STWRefProcTaskProxy: public AbstractGangTask {
5220   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5221   ProcessTask&     _proc_task;
5222   G1CollectedHeap* _g1h;
5223   G1ParScanThreadState** _pss;
5224   RefToScanQueueSet* _task_queues;
5225   ParallelTaskTerminator* _terminator;
5226 
5227 public:
5228   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5229                         G1CollectedHeap* g1h,
5230                         G1ParScanThreadState** per_thread_states,
5231                         RefToScanQueueSet *task_queues,
5232                         ParallelTaskTerminator* terminator) :
5233     AbstractGangTask("Process reference objects in parallel"),
5234     _proc_task(proc_task),
5235     _g1h(g1h),
5236     _pss(per_thread_states),
5237     _task_queues(task_queues),
5238     _terminator(terminator)
5239   {}
5240 
5241   virtual void work(uint worker_id) {
5242     // The reference processing task executed by a single worker.
5243     ResourceMark rm;
5244     HandleMark   hm;
5245 
5246     G1STWIsAliveClosure is_alive(_g1h);
5247 
5248     G1ParScanThreadState*           pss = _pss[worker_id];
5249     pss->set_ref_processor(NULL);
5250 
5251     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
5252 
5253     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
5254 
5255     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5256 


5314   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5315 
5316   _workers->run_task(&enq_task_proxy);
5317 }
5318 
5319 // End of weak reference support closures
5320 
5321 // Abstract task used to preserve (i.e. copy) any referent objects
5322 // that are in the collection set and are pointed to by reference
5323 // objects discovered by the CM ref processor.
5324 
5325 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5326 protected:
5327   G1CollectedHeap*       _g1h;
5328   G1ParScanThreadState** _pss;
5329   RefToScanQueueSet*     _queues;
5330   ParallelTaskTerminator _terminator;
5331   uint _n_workers;
5332 
5333 public:
5334   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadState** per_thread_states, int workers, RefToScanQueueSet *task_queues) :
5335     AbstractGangTask("ParPreserveCMReferents"),
5336     _g1h(g1h),
5337     _pss(per_thread_states),
5338     _queues(task_queues),
5339     _terminator(workers, _queues),
5340     _n_workers(workers)
5341   { }
5342 
5343   void work(uint worker_id) {
5344     ResourceMark rm;
5345     HandleMark   hm;
5346 
5347     G1ParScanThreadState*          pss = _pss[worker_id];
5348     pss->set_ref_processor(NULL);
5349     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5350 
5351     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
5352 
5353     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
5354 
5355     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5356 
5357     if (_g1h->collector_state()->during_initial_mark_pause()) {


5388         iter.load_ptrs(DEBUG_ONLY(true));
5389         oop ref = iter.obj();
5390 
5391         // This will filter nulls.
5392         if (iter.is_referent_alive()) {
5393           iter.make_referent_alive();
5394         }
5395         iter.move_to_next();
5396       }
5397     }
5398 
5399     // Drain the queue - which may cause stealing
5400     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
5401     drain_queue.do_void();
5402     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5403     assert(pss->queue_is_empty(), "should be");
5404   }
5405 };
5406 
5407 // Weak Reference processing during an evacuation pause (part 1).
5408 void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** per_thread_states) {
5409   double ref_proc_start = os::elapsedTime();
5410 
5411   ReferenceProcessor* rp = _ref_processor_stw;
5412   assert(rp->discovery_enabled(), "should have been enabled");
5413 
5414   // Any reference objects, in the collection set, that were 'discovered'
5415   // by the CM ref processor should have already been copied (either by
5416   // applying the external root copy closure to the discovered lists, or
5417   // by following an RSet entry).
5418   //
5419   // But some of the referents, that are in the collection set, that these
5420   // reference objects point to may not have been copied: the STW ref
5421   // processor would have seen that the reference object had already
5422   // been 'discovered' and would have skipped discovering the reference,
5423   // but would not have treated the reference object as a regular oop.
5424   // As a result the copy closure would not have been applied to the
5425   // referent object.
5426   //
5427   // We need to explicitly copy these referent objects - the references
5428   // will be processed at the end of remarking.
5429   //
5430   // We also need to do this copying before we process the reference
5431   // objects discovered by the STW ref processor in case one of these
5432   // referents points to another object which is also referenced by an
5433   // object discovered by the STW ref processor.
5434 
5435   uint no_of_gc_workers = workers()->active_workers();
5436 
5437   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5438                                                  per_thread_states,
5439                                                  no_of_gc_workers,
5440                                                  _task_queues);
5441 
5442   workers()->run_task(&keep_cm_referents);
5443 
5444   // Closure to test whether a referent is alive.
5445   G1STWIsAliveClosure is_alive(this);
5446 
5447   // Even when parallel reference processing is enabled, the processing
5448   // of JNI refs is serial and performed serially by the current thread
5449   // rather than by a worker. The following PSS will be used for processing
5450   // JNI refs.
5451 
5452   // Use only a single queue for this PSS.
5453   G1ParScanThreadState*           pss = per_thread_states[0];
5454   pss->set_ref_processor(NULL);
5455   assert(pss->queue_is_empty(), "pre-condition");
5456 
5457   // We do not embed a reference processor in the copying/scanning
5458   // closures while we're actually processing the discovered
5459   // reference objects.
5460 
5461   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss, NULL);
5462 
5463   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
5464 
5465   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5466 
5467   if (collector_state()->during_initial_mark_pause()) {
5468     // We also need to mark copied objects.
5469     copy_non_heap_cl = &copy_mark_non_heap_cl;
5470   }
5471 
5472   // Keep alive closure.
5473   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);


5475   // Serial Complete GC closure
5476   G1STWDrainQueueClosure drain_queue(this, pss);
5477 
5478   // Setup the soft refs policy...
5479   rp->setup_policy(false);
5480 
5481   ReferenceProcessorStats stats;
5482   if (!rp->processing_is_mt()) {
5483     // Serial reference processing...
5484     stats = rp->process_discovered_references(&is_alive,
5485                                               &keep_alive,
5486                                               &drain_queue,
5487                                               NULL,
5488                                               _gc_timer_stw,
5489                                               _gc_tracer_stw->gc_id());
5490   } else {
5491     // Parallel reference processing
5492     assert(rp->num_q() == no_of_gc_workers, "sanity");
5493     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5494 
5495     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
5496     stats = rp->process_discovered_references(&is_alive,
5497                                               &keep_alive,
5498                                               &drain_queue,
5499                                               &par_task_executor,
5500                                               _gc_timer_stw,
5501                                               _gc_tracer_stw->gc_id());
5502   }
5503 
5504   _gc_tracer_stw->report_gc_reference_stats(stats);
5505 
5506   // We have completed copying any necessary live referent objects.
5507   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5508 
5509   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5510   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5511 }
5512 
5513 // Weak Reference processing during an evacuation pause (part 2).
5514 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadState** per_thread_states) {
5515   double ref_enq_start = os::elapsedTime();
5516 
5517   ReferenceProcessor* rp = _ref_processor_stw;
5518   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5519 
5520   // Now enqueue any remaining on the discovered lists on to
5521   // the pending list.
5522   if (!rp->processing_is_mt()) {
5523     // Serial reference processing...
5524     rp->enqueue_discovered_references();
5525   } else {
5526     // Parallel reference enqueueing
5527 
5528     uint n_workers = workers()->active_workers();
5529 
5530     assert(rp->num_q() == n_workers, "sanity");
5531     assert(n_workers <= rp->max_num_q(), "sanity");
5532 
5533     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
5534     rp->enqueue_discovered_references(&par_task_executor);
5535   }
5536 
5537   rp->verify_no_references_recorded();
5538   assert(!rp->discovery_enabled(), "should have been disabled");
5539 
5540   // FIXME
5541   // CM's reference processing also cleans up the string and symbol tables.
5542   // Should we do that here also? We could, but it is a serial operation
5543   // and could significantly increase the pause time.
5544 
5545   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5546   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5547 }
5548 
5549 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5550   _expand_heap_after_alloc_failure = true;
5551   _evacuation_failed = false;
5552 
5553   // Should G1EvacuationFailureALot be in effect for this GC?


< prev index next >