src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6323 : 8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin
rev 6326 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled. This causes wrong calculation of PLAB sizes, as the amount of space wasted is not updated correctly.
Reviewed-by: brutisso
rev 6327 : 8019342: G1: High "Other" time most likely due to card redirtying
Summary: Parallelize card redirtying to decrease the time it takes.
Reviewed-by: brutisso
rev 6328 : 8040002: Clean up code and code duplication in re-diryting cards for verification
Summary: Card re-dirtying code for verification and actual redirtying uses two different, almost completely identical card closures. Also the verification code still assumes a perm gen.
Reviewed-by: brutisso, jmasa
rev 6334 : 8035400: Move G1ParScanThreadState into its own files
Summary: Extract the G1ParScanThreadState class from G1CollectedHeap.?pp into its own files.
Reviewed-by: brutisso, mgerdin
rev 6335 : 8035401: Fix visibility of G1ParScanThreadState members
Summary: After JDK-8035400 there were several opportunities to fix the visibility of several members of the G1ParScanThreadState class.
Reviewed-by: brutisso, mgerdin


4672                                 RefToScanQueueSet* queues,
4673                                 ParallelTaskTerminator* terminator)
4674     : _g1h(g1h), _par_scan_state(par_scan_state),
4675       _queues(queues), _terminator(terminator) {}
4676 
4677   void do_void();
4678 
4679 private:
4680   inline bool offer_termination();
4681 };
4682 
4683 bool G1ParEvacuateFollowersClosure::offer_termination() {
4684   G1ParScanThreadState* const pss = par_scan_state();
4685   pss->start_term_time();
4686   const bool res = terminator()->offer_termination();
4687   pss->end_term_time();
4688   return res;
4689 }
4690 
4691 void G1ParEvacuateFollowersClosure::do_void() {
4692   StarTask stolen_task;
4693   G1ParScanThreadState* const pss = par_scan_state();
4694   pss->trim_queue();
4695 
4696   do {
4697     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4698       assert(pss->verify_task(stolen_task), "sanity");
4699       if (stolen_task.is_narrow()) {
4700         pss->deal_with_reference((narrowOop*) stolen_task);
4701       } else {
4702         pss->deal_with_reference((oop*) stolen_task);
4703       }
4704 
4705       // We've just processed a reference and we might have made
4706       // available new entries on the queues. So we have to make sure
4707       // we drain the queues as necessary.
4708       pss->trim_queue();
4709     }
4710   } while (!offer_termination());
4711 }
4712 
4713 class G1KlassScanClosure : public KlassClosure {
4714  G1ParCopyHelper* _closure;
4715  bool             _process_only_dirty;
4716  int              _count;
4717  public:
4718   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4719       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4720   void do_klass(Klass* klass) {
4721     // If the klass has not been dirtied we know that there's
4722     // no references into  the young gen and we can skip it.
4723    if (!_process_only_dirty || klass->has_modified_oops()) {
4724       // Clean the klass since we're going to scavenge all the metadata.
4725       klass->clear_modified_oops();
4726 
4727       // Tell the closure that this klass is the Klass to scavenge
4728       // and is the one to dirty if oops are left pointing into the young gen.
4729       _closure->set_scanned_klass(klass);


4735     _count++;
4736   }
4737 };
4738 
4739 class G1ParTask : public AbstractGangTask {
4740 protected:
4741   G1CollectedHeap*       _g1h;
4742   RefToScanQueueSet      *_queues;
4743   ParallelTaskTerminator _terminator;
4744   uint _n_workers;
4745 
4746   Mutex _stats_lock;
4747   Mutex* stats_lock() { return &_stats_lock; }
4748 
4749   size_t getNCards() {
4750     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4751       / G1BlockOffsetSharedArray::N_bytes;
4752   }
4753 
4754 public:
4755   G1ParTask(G1CollectedHeap* g1h,
4756             RefToScanQueueSet *task_queues)
4757     : AbstractGangTask("G1 collection"),
4758       _g1h(g1h),
4759       _queues(task_queues),
4760       _terminator(0, _queues),
4761       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4762   {}
4763 
4764   RefToScanQueueSet* queues() { return _queues; }
4765 
4766   RefToScanQueue *work_queue(int i) {
4767     return queues()->queue(i);
4768   }
4769 
4770   ParallelTaskTerminator* terminator() { return &_terminator; }
4771 
4772   virtual void set_for_termination(int active_workers) {
4773     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4774     // in the young space (_par_seq_tasks) in the G1 heap
4775     // for SequentialSubTasksDone.
4776     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap


4834                                     worker_id);
4835       pss.end_strong_roots();
4836 
4837       {
4838         double start = os::elapsedTime();
4839         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4840         evac.do_void();
4841         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4842         double term_ms = pss.term_time()*1000.0;
4843         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4844         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4845       }
4846       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4847       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4848 
4849       if (ParallelGCVerbose) {
4850         MutexLocker x(stats_lock());
4851         pss.print_termination_stats(worker_id);
4852       }
4853 
4854       assert(pss.refs()->is_empty(), "should be empty");
4855 
4856       // Close the inner scope so that the ResourceMark and HandleMark
4857       // destructors are executed here and are included as part of the
4858       // "GC Worker Time".
4859     }
4860 
4861     double end_time_ms = os::elapsedTime() * 1000.0;
4862     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4863   }
4864 };
4865 
4866 // *** Common G1 Evacuation Stuff
4867 
4868 // This method is run in a GC worker.
4869 
4870 void
4871 G1CollectedHeap::
4872 g1_process_strong_roots(bool is_scavenging,
4873                         ScanningOption so,
4874                         OopClosure* scan_non_heap_roots,


5377   uint _n_workers;
5378 
5379 public:
5380   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5381     AbstractGangTask("ParPreserveCMReferents"),
5382     _g1h(g1h),
5383     _queues(task_queues),
5384     _terminator(workers, _queues),
5385     _n_workers(workers)
5386   { }
5387 
5388   void work(uint worker_id) {
5389     ResourceMark rm;
5390     HandleMark   hm;
5391 
5392     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5393     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5394 
5395     pss.set_evac_failure_closure(&evac_failure_cl);
5396 
5397     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5398 
5399 
5400     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5401     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5402 
5403     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5404     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5405 
5406     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5407     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5408 
5409     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5410       // We also need to mark copied objects.
5411       copy_non_heap_cl = &copy_mark_non_heap_cl;
5412       copy_metadata_cl = &copy_mark_metadata_cl;
5413     }
5414 
5415     // Is alive closure
5416     G1AlwaysAliveClosure always_alive(_g1h);
5417 
5418     // Copying keep alive closure. Applied to referent objects that need


5436 
5437       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5438       while (iter.has_next()) {
5439         // Since discovery is not atomic for the CM ref processor, we
5440         // can see some null referent objects.
5441         iter.load_ptrs(DEBUG_ONLY(true));
5442         oop ref = iter.obj();
5443 
5444         // This will filter nulls.
5445         if (iter.is_referent_alive()) {
5446           iter.make_referent_alive();
5447         }
5448         iter.move_to_next();
5449       }
5450     }
5451 
5452     // Drain the queue - which may cause stealing
5453     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5454     drain_queue.do_void();
5455     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5456     assert(pss.refs()->is_empty(), "should be");
5457   }
5458 };
5459 
5460 // Weak Reference processing during an evacuation pause (part 1).
5461 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5462   double ref_proc_start = os::elapsedTime();
5463 
5464   ReferenceProcessor* rp = _ref_processor_stw;
5465   assert(rp->discovery_enabled(), "should have been enabled");
5466 
5467   // Any reference objects, in the collection set, that were 'discovered'
5468   // by the CM ref processor should have already been copied (either by
5469   // applying the external root copy closure to the discovered lists, or
5470   // by following an RSet entry).
5471   //
5472   // But some of the referents, that are in the collection set, that these
5473   // reference objects point to may not have been copied: the STW ref
5474   // processor would have seen that the reference object had already
5475   // been 'discovered' and would have skipped discovering the reference,
5476   // but would not have treated the reference object as a regular oop.


5503   set_par_threads(0);
5504 
5505   // Closure to test whether a referent is alive.
5506   G1STWIsAliveClosure is_alive(this);
5507 
5508   // Even when parallel reference processing is enabled, the processing
5509   // of JNI refs is serial and performed serially by the current thread
5510   // rather than by a worker. The following PSS will be used for processing
5511   // JNI refs.
5512 
5513   // Use only a single queue for this PSS.
5514   G1ParScanThreadState            pss(this, 0, NULL);
5515 
5516   // We do not embed a reference processor in the copying/scanning
5517   // closures while we're actually processing the discovered
5518   // reference objects.
5519   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5520 
5521   pss.set_evac_failure_closure(&evac_failure_cl);
5522 
5523   assert(pss.refs()->is_empty(), "pre-condition");
5524 
5525   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5526   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5527 
5528   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5529   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5530 
5531   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5532   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5533 
5534   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5535     // We also need to mark copied objects.
5536     copy_non_heap_cl = &copy_mark_non_heap_cl;
5537     copy_metadata_cl = &copy_mark_metadata_cl;
5538   }
5539 
5540   // Keep alive closure.
5541   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5542 
5543   // Serial Complete GC closure


5555                                               NULL,
5556                                               _gc_timer_stw,
5557                                               _gc_tracer_stw->gc_id());
5558   } else {
5559     // Parallel reference processing
5560     assert(rp->num_q() == no_of_gc_workers, "sanity");
5561     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5562 
5563     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5564     stats = rp->process_discovered_references(&is_alive,
5565                                               &keep_alive,
5566                                               &drain_queue,
5567                                               &par_task_executor,
5568                                               _gc_timer_stw,
5569                                               _gc_tracer_stw->gc_id());
5570   }
5571 
5572   _gc_tracer_stw->report_gc_reference_stats(stats);
5573 
5574   // We have completed copying any necessary live referent objects.
5575   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5576 
5577   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5578   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5579 }
5580 
5581 // Weak Reference processing during an evacuation pause (part 2).
5582 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5583   double ref_enq_start = os::elapsedTime();
5584 
5585   ReferenceProcessor* rp = _ref_processor_stw;
5586   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5587 
5588   // Now enqueue any remaining on the discovered lists on to
5589   // the pending list.
5590   if (!rp->processing_is_mt()) {
5591     // Serial reference processing...
5592     rp->enqueue_discovered_references();
5593   } else {
5594     // Parallel reference enqueueing
5595 




4672                                 RefToScanQueueSet* queues,
4673                                 ParallelTaskTerminator* terminator)
4674     : _g1h(g1h), _par_scan_state(par_scan_state),
4675       _queues(queues), _terminator(terminator) {}
4676 
4677   void do_void();
4678 
4679 private:
4680   inline bool offer_termination();
4681 };
4682 
4683 bool G1ParEvacuateFollowersClosure::offer_termination() {
4684   G1ParScanThreadState* const pss = par_scan_state();
4685   pss->start_term_time();
4686   const bool res = terminator()->offer_termination();
4687   pss->end_term_time();
4688   return res;
4689 }
4690 
4691 void G1ParEvacuateFollowersClosure::do_void() {

4692   G1ParScanThreadState* const pss = par_scan_state();
4693   pss->trim_queue();

4694   do {
4695     pss->steal_and_trim_queue(queues());












4696   } while (!offer_termination());
4697 }
4698 
4699 class G1KlassScanClosure : public KlassClosure {
4700  G1ParCopyHelper* _closure;
4701  bool             _process_only_dirty;
4702  int              _count;
4703  public:
4704   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4705       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4706   void do_klass(Klass* klass) {
4707     // If the klass has not been dirtied we know that there's
4708     // no references into  the young gen and we can skip it.
4709    if (!_process_only_dirty || klass->has_modified_oops()) {
4710       // Clean the klass since we're going to scavenge all the metadata.
4711       klass->clear_modified_oops();
4712 
4713       // Tell the closure that this klass is the Klass to scavenge
4714       // and is the one to dirty if oops are left pointing into the young gen.
4715       _closure->set_scanned_klass(klass);


4721     _count++;
4722   }
4723 };
4724 
4725 class G1ParTask : public AbstractGangTask {
4726 protected:
4727   G1CollectedHeap*       _g1h;
4728   RefToScanQueueSet      *_queues;
4729   ParallelTaskTerminator _terminator;
4730   uint _n_workers;
4731 
4732   Mutex _stats_lock;
4733   Mutex* stats_lock() { return &_stats_lock; }
4734 
4735   size_t getNCards() {
4736     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4737       / G1BlockOffsetSharedArray::N_bytes;
4738   }
4739 
4740 public:
4741   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)

4742     : AbstractGangTask("G1 collection"),
4743       _g1h(g1h),
4744       _queues(task_queues),
4745       _terminator(0, _queues),
4746       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4747   {}
4748 
4749   RefToScanQueueSet* queues() { return _queues; }
4750 
4751   RefToScanQueue *work_queue(int i) {
4752     return queues()->queue(i);
4753   }
4754 
4755   ParallelTaskTerminator* terminator() { return &_terminator; }
4756 
4757   virtual void set_for_termination(int active_workers) {
4758     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4759     // in the young space (_par_seq_tasks) in the G1 heap
4760     // for SequentialSubTasksDone.
4761     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap


4819                                     worker_id);
4820       pss.end_strong_roots();
4821 
4822       {
4823         double start = os::elapsedTime();
4824         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4825         evac.do_void();
4826         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4827         double term_ms = pss.term_time()*1000.0;
4828         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4829         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4830       }
4831       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4832       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4833 
4834       if (ParallelGCVerbose) {
4835         MutexLocker x(stats_lock());
4836         pss.print_termination_stats(worker_id);
4837       }
4838 
4839       assert(pss.queue_is_empty(), "should be empty");
4840 
4841       // Close the inner scope so that the ResourceMark and HandleMark
4842       // destructors are executed here and are included as part of the
4843       // "GC Worker Time".
4844     }
4845 
4846     double end_time_ms = os::elapsedTime() * 1000.0;
4847     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4848   }
4849 };
4850 
4851 // *** Common G1 Evacuation Stuff
4852 
4853 // This method is run in a GC worker.
4854 
4855 void
4856 G1CollectedHeap::
4857 g1_process_strong_roots(bool is_scavenging,
4858                         ScanningOption so,
4859                         OopClosure* scan_non_heap_roots,


5362   uint _n_workers;
5363 
5364 public:
5365   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5366     AbstractGangTask("ParPreserveCMReferents"),
5367     _g1h(g1h),
5368     _queues(task_queues),
5369     _terminator(workers, _queues),
5370     _n_workers(workers)
5371   { }
5372 
5373   void work(uint worker_id) {
5374     ResourceMark rm;
5375     HandleMark   hm;
5376 
5377     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5378     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5379 
5380     pss.set_evac_failure_closure(&evac_failure_cl);
5381 
5382     assert(pss.queue_is_empty(), "both queue and overflow should be empty");

5383 
5384     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5385     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5386 
5387     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5388     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5389 
5390     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5391     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5392 
5393     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5394       // We also need to mark copied objects.
5395       copy_non_heap_cl = &copy_mark_non_heap_cl;
5396       copy_metadata_cl = &copy_mark_metadata_cl;
5397     }
5398 
5399     // Is alive closure
5400     G1AlwaysAliveClosure always_alive(_g1h);
5401 
5402     // Copying keep alive closure. Applied to referent objects that need


5420 
5421       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5422       while (iter.has_next()) {
5423         // Since discovery is not atomic for the CM ref processor, we
5424         // can see some null referent objects.
5425         iter.load_ptrs(DEBUG_ONLY(true));
5426         oop ref = iter.obj();
5427 
5428         // This will filter nulls.
5429         if (iter.is_referent_alive()) {
5430           iter.make_referent_alive();
5431         }
5432         iter.move_to_next();
5433       }
5434     }
5435 
5436     // Drain the queue - which may cause stealing
5437     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5438     drain_queue.do_void();
5439     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5440     assert(pss.queue_is_empty(), "should be");
5441   }
5442 };
5443 
5444 // Weak Reference processing during an evacuation pause (part 1).
5445 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5446   double ref_proc_start = os::elapsedTime();
5447 
5448   ReferenceProcessor* rp = _ref_processor_stw;
5449   assert(rp->discovery_enabled(), "should have been enabled");
5450 
5451   // Any reference objects, in the collection set, that were 'discovered'
5452   // by the CM ref processor should have already been copied (either by
5453   // applying the external root copy closure to the discovered lists, or
5454   // by following an RSet entry).
5455   //
5456   // But some of the referents, that are in the collection set, that these
5457   // reference objects point to may not have been copied: the STW ref
5458   // processor would have seen that the reference object had already
5459   // been 'discovered' and would have skipped discovering the reference,
5460   // but would not have treated the reference object as a regular oop.


5487   set_par_threads(0);
5488 
5489   // Closure to test whether a referent is alive.
5490   G1STWIsAliveClosure is_alive(this);
5491 
5492   // Even when parallel reference processing is enabled, the processing
5493   // of JNI refs is serial and performed serially by the current thread
5494   // rather than by a worker. The following PSS will be used for processing
5495   // JNI refs.
5496 
5497   // Use only a single queue for this PSS.
5498   G1ParScanThreadState            pss(this, 0, NULL);
5499 
5500   // We do not embed a reference processor in the copying/scanning
5501   // closures while we're actually processing the discovered
5502   // reference objects.
5503   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5504 
5505   pss.set_evac_failure_closure(&evac_failure_cl);
5506 
5507   assert(pss.queue_is_empty(), "pre-condition");
5508 
5509   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5510   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5511 
5512   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5513   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5514 
5515   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5516   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5517 
5518   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5519     // We also need to mark copied objects.
5520     copy_non_heap_cl = &copy_mark_non_heap_cl;
5521     copy_metadata_cl = &copy_mark_metadata_cl;
5522   }
5523 
5524   // Keep alive closure.
5525   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5526 
5527   // Serial Complete GC closure


5539                                               NULL,
5540                                               _gc_timer_stw,
5541                                               _gc_tracer_stw->gc_id());
5542   } else {
5543     // Parallel reference processing
5544     assert(rp->num_q() == no_of_gc_workers, "sanity");
5545     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5546 
5547     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5548     stats = rp->process_discovered_references(&is_alive,
5549                                               &keep_alive,
5550                                               &drain_queue,
5551                                               &par_task_executor,
5552                                               _gc_timer_stw,
5553                                               _gc_tracer_stw->gc_id());
5554   }
5555 
5556   _gc_tracer_stw->report_gc_reference_stats(stats);
5557 
5558   // We have completed copying any necessary live referent objects.
5559   assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5560 
5561   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5562   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5563 }
5564 
5565 // Weak Reference processing during an evacuation pause (part 2).
5566 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5567   double ref_enq_start = os::elapsedTime();
5568 
5569   ReferenceProcessor* rp = _ref_processor_stw;
5570   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5571 
5572   // Now enqueue any remaining on the discovered lists on to
5573   // the pending list.
5574   if (!rp->processing_is_mt()) {
5575     // Serial reference processing...
5576     rp->enqueue_discovered_references();
5577   } else {
5578     // Parallel reference enqueueing
5579