src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




5343   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5344   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5345   void do_oop(      oop* p) {
5346     oop obj = *p;
5347 
5348     if (_g1->obj_in_cs(obj)) {
5349       assert( obj->is_forwarded(), "invariant" );
5350       *p = obj->forwardee();
5351     }
5352   }
5353 };
5354 
5355 // Copying Keep Alive closure - can be called from both
5356 // serial and parallel code as long as different worker
5357 // threads utilize different G1ParScanThreadState instances
5358 // and different queues.
5359 
5360 class G1CopyingKeepAliveClosure: public OopClosure {
5361   G1CollectedHeap*         _g1h;
5362   OopClosure*              _copy_non_heap_obj_cl;
5363   OopsInHeapRegionClosure* _copy_metadata_obj_cl;
5364   G1ParScanThreadState*    _par_scan_state;
5365 
5366 public:
5367   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5368                             OopClosure* non_heap_obj_cl,
5369                             OopsInHeapRegionClosure* metadata_obj_cl,
5370                             G1ParScanThreadState* pss):
5371     _g1h(g1h),
5372     _copy_non_heap_obj_cl(non_heap_obj_cl),
5373     _copy_metadata_obj_cl(metadata_obj_cl),
5374     _par_scan_state(pss)
5375   {}
5376 
5377   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5378   virtual void do_oop(      oop* p) { do_oop_work(p); }
5379 
5380   template <class T> void do_oop_work(T* p) {
5381     oop obj = oopDesc::load_decode_heap_oop(p);
5382 
5383     if (_g1h->obj_in_cs(obj)) {
5384       // If the referent object has been forwarded (either copied
5385       // to a new location or to itself in the event of an
5386       // evacuation failure) then we need to update the reference
5387       // field and, if both reference and referent are in the G1
5388       // heap, update the RSet for the referent.
5389       //
5390       // If the referent has not been forwarded then we have to keep
5391       // it alive by policy. Therefore we have copy the referent.
5392       //
5393       // If the reference field is in the G1 heap then we can push
5394       // on the PSS queue. When the queue is drained (after each
5395       // phase of reference processing) the object and it's followers
5396       // will be copied, the reference field set to point to the
5397       // new location, and the RSet updated. Otherwise we need to
5398       // use the the non-heap or metadata closures directly to copy
5399       // the referent object and update the pointer, while avoiding
5400       // updating the RSet.
5401 
5402       if (_g1h->is_in_g1_reserved(p)) {
5403         _par_scan_state->push_on_queue(p);
5404       } else {
5405         assert(!Metaspace::contains((const void*)p),
5406                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5407                               PTR_FORMAT, p));
5408           _copy_non_heap_obj_cl->do_oop(p);
5409         }
5410       }
5411     }
5412 };
5413 
5414 // Serial drain queue closure. Called as the 'complete_gc'
5415 // closure for each discovered list in some of the
5416 // reference processing phases.
5417 
5418 class G1STWDrainQueueClosure: public VoidClosure {
5419 protected:
5420   G1CollectedHeap* _g1h;
5421   G1ParScanThreadState* _par_scan_state;
5422 
5423   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5424 
5425 public:
5426   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :


5481     AbstractGangTask("Process reference objects in parallel"),
5482     _proc_task(proc_task),
5483     _g1h(g1h),
5484     _task_queues(task_queues),
5485     _terminator(terminator)
5486   {}
5487 
5488   virtual void work(uint worker_id) {
5489     // The reference processing task executed by a single worker.
5490     ResourceMark rm;
5491     HandleMark   hm;
5492 
5493     G1STWIsAliveClosure is_alive(_g1h);
5494 
5495     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5496     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5497 
5498     pss.set_evac_failure_closure(&evac_failure_cl);
5499 
5500     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5501     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5502 
5503     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5504     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5505 
5506     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5507     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5508 
5509     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5510       // We also need to mark copied objects.
5511       copy_non_heap_cl = &copy_mark_non_heap_cl;
5512       copy_metadata_cl = &copy_mark_metadata_cl;
5513     }
5514 
5515     // Keep alive closure.
5516     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5517 
5518     // Complete GC closure
5519     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5520 
5521     // Call the reference processing task's work routine.
5522     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5523 
5524     // Note we cannot assert that the refs array is empty here as not all
5525     // of the processing tasks (specifically phase2 - pp2_work) execute
5526     // the complete_gc closure (which ordinarily would drain the queue) so
5527     // the queue may not be empty.
5528   }
5529 };
5530 
5531 // Driver routine for parallel reference processing.
5532 // Creates an instance of the ref processing gang
5533 // task and has the worker threads execute it.
5534 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5535   assert(_workers != NULL, "Need parallel worker threads.");
5536 


5591     AbstractGangTask("ParPreserveCMReferents"),
5592     _g1h(g1h),
5593     _queues(task_queues),
5594     _terminator(workers, _queues),
5595     _n_workers(workers)
5596   { }
5597 
5598   void work(uint worker_id) {
5599     ResourceMark rm;
5600     HandleMark   hm;
5601 
5602     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5603     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5604 
5605     pss.set_evac_failure_closure(&evac_failure_cl);
5606 
5607     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5608 
5609 
5610     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5611     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5612 
5613     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5614     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5615 
5616     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5617     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5618 
5619     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5620       // We also need to mark copied objects.
5621       copy_non_heap_cl = &copy_mark_non_heap_cl;
5622       copy_metadata_cl = &copy_mark_metadata_cl;
5623     }
5624 
5625     // Is alive closure
5626     G1AlwaysAliveClosure always_alive(_g1h);
5627 
5628     // Copying keep alive closure. Applied to referent objects that need
5629     // to be copied.
5630     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5631 
5632     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5633 
5634     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5635     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5636 
5637     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5638     // So this must be true - but assert just in case someone decides to
5639     // change the worker ids.
5640     assert(0 <= worker_id && worker_id < limit, "sanity");
5641     assert(!rp->discovery_is_atomic(), "check this code");
5642 
5643     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5644     for (uint idx = worker_id; idx < limit; idx += stride) {
5645       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5646 
5647       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5648       while (iter.has_next()) {
5649         // Since discovery is not atomic for the CM ref processor, we
5650         // can see some null referent objects.


5716   G1STWIsAliveClosure is_alive(this);
5717 
5718   // Even when parallel reference processing is enabled, the processing
5719   // of JNI refs is serial and performed serially by the current thread
5720   // rather than by a worker. The following PSS will be used for processing
5721   // JNI refs.
5722 
5723   // Use only a single queue for this PSS.
5724   G1ParScanThreadState            pss(this, 0, NULL);
5725 
5726   // We do not embed a reference processor in the copying/scanning
5727   // closures while we're actually processing the discovered
5728   // reference objects.
5729   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5730 
5731   pss.set_evac_failure_closure(&evac_failure_cl);
5732 
5733   assert(pss.refs()->is_empty(), "pre-condition");
5734 
5735   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5736   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5737 
5738   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5739   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5740 
5741   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5742   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5743 
5744   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5745     // We also need to mark copied objects.
5746     copy_non_heap_cl = &copy_mark_non_heap_cl;
5747     copy_metadata_cl = &copy_mark_metadata_cl;
5748   }
5749 
5750   // Keep alive closure.
5751   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5752 
5753   // Serial Complete GC closure
5754   G1STWDrainQueueClosure drain_queue(this, &pss);
5755 
5756   // Setup the soft refs policy...
5757   rp->setup_policy(false);
5758 
5759   ReferenceProcessorStats stats;
5760   if (!rp->processing_is_mt()) {
5761     // Serial reference processing...
5762     stats = rp->process_discovered_references(&is_alive,
5763                                               &keep_alive,
5764                                               &drain_queue,
5765                                               NULL,
5766                                               _gc_timer_stw);
5767   } else {
5768     // Parallel reference processing
5769     assert(rp->num_q() == no_of_gc_workers, "sanity");
5770     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5771 




5343   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5344   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5345   void do_oop(      oop* p) {
5346     oop obj = *p;
5347 
5348     if (_g1->obj_in_cs(obj)) {
5349       assert( obj->is_forwarded(), "invariant" );
5350       *p = obj->forwardee();
5351     }
5352   }
5353 };
5354 
5355 // Copying Keep Alive closure - can be called from both
5356 // serial and parallel code as long as different worker
5357 // threads utilize different G1ParScanThreadState instances
5358 // and different queues.
5359 
5360 class G1CopyingKeepAliveClosure: public OopClosure {
5361   G1CollectedHeap*         _g1h;
5362   OopClosure*              _copy_non_heap_obj_cl;

5363   G1ParScanThreadState*    _par_scan_state;
5364 
5365 public:
5366   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5367                             OopClosure* non_heap_obj_cl,

5368                             G1ParScanThreadState* pss):
5369     _g1h(g1h),
5370     _copy_non_heap_obj_cl(non_heap_obj_cl),

5371     _par_scan_state(pss)
5372   {}
5373 
5374   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5375   virtual void do_oop(      oop* p) { do_oop_work(p); }
5376 
5377   template <class T> void do_oop_work(T* p) {
5378     oop obj = oopDesc::load_decode_heap_oop(p);
5379 
5380     if (_g1h->obj_in_cs(obj)) {
5381       // If the referent object has been forwarded (either copied
5382       // to a new location or to itself in the event of an
5383       // evacuation failure) then we need to update the reference
5384       // field and, if both reference and referent are in the G1
5385       // heap, update the RSet for the referent.
5386       //
5387       // If the referent has not been forwarded then we have to keep
5388       // it alive by policy. Therefore we have copy the referent.
5389       //
5390       // If the reference field is in the G1 heap then we can push
5391       // on the PSS queue. When the queue is drained (after each
5392       // phase of reference processing) the object and it's followers
5393       // will be copied, the reference field set to point to the
5394       // new location, and the RSet updated. Otherwise we need to
5395       // use the the non-heap or metadata closures directly to copy
5396       // the referent object and update the pointer, while avoiding
5397       // updating the RSet.
5398 
5399       if (_g1h->is_in_g1_reserved(p)) {
5400         _par_scan_state->push_on_queue(p);
5401       } else {
5402         assert(!Metaspace::contains((const void*)p),
5403                err_msg("Unexpectedly found a pointer from metadata: "
5404                               PTR_FORMAT, p));
5405           _copy_non_heap_obj_cl->do_oop(p);
5406         }
5407       }
5408     }
5409 };
5410 
5411 // Serial drain queue closure. Called as the 'complete_gc'
5412 // closure for each discovered list in some of the
5413 // reference processing phases.
5414 
5415 class G1STWDrainQueueClosure: public VoidClosure {
5416 protected:
5417   G1CollectedHeap* _g1h;
5418   G1ParScanThreadState* _par_scan_state;
5419 
5420   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5421 
5422 public:
5423   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :


5478     AbstractGangTask("Process reference objects in parallel"),
5479     _proc_task(proc_task),
5480     _g1h(g1h),
5481     _task_queues(task_queues),
5482     _terminator(terminator)
5483   {}
5484 
5485   virtual void work(uint worker_id) {
5486     // The reference processing task executed by a single worker.
5487     ResourceMark rm;
5488     HandleMark   hm;
5489 
5490     G1STWIsAliveClosure is_alive(_g1h);
5491 
5492     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5493     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5494 
5495     pss.set_evac_failure_closure(&evac_failure_cl);
5496 
5497     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);

5498 
5499     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);

5500 
5501     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;

5502 
5503     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5504       // We also need to mark copied objects.
5505       copy_non_heap_cl = &copy_mark_non_heap_cl;

5506     }
5507 
5508     // Keep alive closure.
5509     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5510 
5511     // Complete GC closure
5512     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5513 
5514     // Call the reference processing task's work routine.
5515     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5516 
5517     // Note we cannot assert that the refs array is empty here as not all
5518     // of the processing tasks (specifically phase2 - pp2_work) execute
5519     // the complete_gc closure (which ordinarily would drain the queue) so
5520     // the queue may not be empty.
5521   }
5522 };
5523 
5524 // Driver routine for parallel reference processing.
5525 // Creates an instance of the ref processing gang
5526 // task and has the worker threads execute it.
5527 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5528   assert(_workers != NULL, "Need parallel worker threads.");
5529 


5584     AbstractGangTask("ParPreserveCMReferents"),
5585     _g1h(g1h),
5586     _queues(task_queues),
5587     _terminator(workers, _queues),
5588     _n_workers(workers)
5589   { }
5590 
5591   void work(uint worker_id) {
5592     ResourceMark rm;
5593     HandleMark   hm;
5594 
5595     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5596     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5597 
5598     pss.set_evac_failure_closure(&evac_failure_cl);
5599 
5600     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5601 
5602 
5603     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);

5604 
5605     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);

5606 
5607     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;

5608 
5609     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5610       // We also need to mark copied objects.
5611       copy_non_heap_cl = &copy_mark_non_heap_cl;

5612     }
5613 
5614     // Is alive closure
5615     G1AlwaysAliveClosure always_alive(_g1h);
5616 
5617     // Copying keep alive closure. Applied to referent objects that need
5618     // to be copied.
5619     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5620 
5621     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5622 
5623     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5624     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5625 
5626     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5627     // So this must be true - but assert just in case someone decides to
5628     // change the worker ids.
5629     assert(0 <= worker_id && worker_id < limit, "sanity");
5630     assert(!rp->discovery_is_atomic(), "check this code");
5631 
5632     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5633     for (uint idx = worker_id; idx < limit; idx += stride) {
5634       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5635 
5636       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5637       while (iter.has_next()) {
5638         // Since discovery is not atomic for the CM ref processor, we
5639         // can see some null referent objects.


5705   G1STWIsAliveClosure is_alive(this);
5706 
5707   // Even when parallel reference processing is enabled, the processing
5708   // of JNI refs is serial and performed serially by the current thread
5709   // rather than by a worker. The following PSS will be used for processing
5710   // JNI refs.
5711 
5712   // Use only a single queue for this PSS.
5713   G1ParScanThreadState            pss(this, 0, NULL);
5714 
5715   // We do not embed a reference processor in the copying/scanning
5716   // closures while we're actually processing the discovered
5717   // reference objects.
5718   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5719 
5720   pss.set_evac_failure_closure(&evac_failure_cl);
5721 
5722   assert(pss.refs()->is_empty(), "pre-condition");
5723 
5724   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);

5725 
5726   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);

5727 
5728   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;

5729 
5730   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5731     // We also need to mark copied objects.
5732     copy_non_heap_cl = &copy_mark_non_heap_cl;

5733   }
5734 
5735   // Keep alive closure.
5736   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5737 
5738   // Serial Complete GC closure
5739   G1STWDrainQueueClosure drain_queue(this, &pss);
5740 
5741   // Setup the soft refs policy...
5742   rp->setup_policy(false);
5743 
5744   ReferenceProcessorStats stats;
5745   if (!rp->processing_is_mt()) {
5746     // Serial reference processing...
5747     stats = rp->process_discovered_references(&is_alive,
5748                                               &keep_alive,
5749                                               &drain_queue,
5750                                               NULL,
5751                                               _gc_timer_stw);
5752   } else {
5753     // Parallel reference processing
5754     assert(rp->num_q() == no_of_gc_workers, "sanity");
5755     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5756