< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 49518 : imported patch 8200385-prev-bitmap-marks-left
rev 49525 : [mq]: 8200426-sangheon-review


3624   double redirty_logged_cards_start = os::elapsedTime();
3625 
3626   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3627   dirty_card_queue_set().reset_for_par_iteration();
3628   workers()->run_task(&redirty_task);
3629 
3630   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3631   dcq.merge_bufferlists(&dirty_card_queue_set());
3632   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3633 
3634   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3635 }
3636 
3637 // Weak Reference Processing support
3638 
3639 // An always "is_alive" closure that is used to preserve referents.
3640 // If the object is non-null then it's alive.  Used in the preservation
3641 // of referent objects that are pointed to by reference objects
3642 // discovered by the CM ref processor.
3643 class G1AlwaysAliveClosure: public BoolObjectClosure {
3644   G1CollectedHeap* _g1;
3645 public:
3646   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3647   bool do_object_b(oop p) {
3648     if (p != NULL) {
3649       return true;
3650     }
3651     return false;
3652   }
3653 };
3654 
3655 bool G1STWIsAliveClosure::do_object_b(oop p) {
3656   // An object is reachable if it is outside the collection set,
3657   // or is inside and copied.
3658   return !_g1->is_in_cset(p) || p->is_forwarded();
3659 }
3660 
3661 // Non Copying Keep Alive closure
3662 class G1KeepAliveClosure: public OopClosure {
3663   G1CollectedHeap* _g1;
3664 public:
3665   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3666   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3667   void do_oop(oop* p) {
3668     oop obj = *p;
3669     assert(obj != NULL, "the caller should have filtered out NULL values");
3670 
3671     const InCSetState cset_state = _g1->in_cset_state(obj);
3672     if (!cset_state.is_in_cset_or_humongous()) {
3673       return;
3674     }
3675     if (cset_state.is_in_cset()) {
3676       assert( obj->is_forwarded(), "invariant" );
3677       *p = obj->forwardee();
3678     } else {
3679       assert(!obj->is_forwarded(), "invariant" );
3680       assert(cset_state.is_humongous(),
3681              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
3682       _g1->set_humongous_is_live(obj);
3683     }
3684   }
3685 };
3686 
3687 // Copying Keep Alive closure - can be called from both
3688 // serial and parallel code as long as different worker
3689 // threads utilize different G1ParScanThreadState instances
3690 // and different queues.
3691 
3692 class G1CopyingKeepAliveClosure: public OopClosure {
3693   G1CollectedHeap*         _g1h;
3694   OopClosure*              _copy_non_heap_obj_cl;
3695   G1ParScanThreadState*    _par_scan_state;
3696 
3697 public:
3698   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3699                             OopClosure* non_heap_obj_cl,
3700                             G1ParScanThreadState* pss):
3701     _g1h(g1h),
3702     _copy_non_heap_obj_cl(non_heap_obj_cl),


3906     _g1h(g1h),
3907     _pss(per_thread_states),
3908     _queues(task_queues),
3909     _terminator(workers, _queues),
3910     _n_workers(workers)
3911   {
3912     g1h->ref_processor_cm()->set_active_mt_degree(workers);
3913   }
3914 
3915   void work(uint worker_id) {
3916     G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
3917 
3918     ResourceMark rm;
3919     HandleMark   hm;
3920 
3921     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
3922     pss->set_ref_processor(NULL);
3923     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3924 
3925     // Is alive closure
3926     G1AlwaysAliveClosure always_alive(_g1h);
3927 
3928     // Copying keep alive closure. Applied to referent objects that need
3929     // to be copied.
3930     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
3931 
3932     ReferenceProcessor* rp = _g1h->ref_processor_cm();
3933 
3934     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
3935     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
3936 
3937     // limit is set using max_num_q() - which was set using ParallelGCThreads.
3938     // So this must be true - but assert just in case someone decides to
3939     // change the worker ids.
3940     assert(worker_id < limit, "sanity");
3941     assert(!rp->discovery_is_atomic(), "check this code");
3942 
3943     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
3944     for (uint idx = worker_id; idx < limit; idx += stride) {
3945       DiscoveredList& ref_list = rp->discovered_refs()[idx];
3946 




3624   double redirty_logged_cards_start = os::elapsedTime();
3625 
3626   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3627   dirty_card_queue_set().reset_for_par_iteration();
3628   workers()->run_task(&redirty_task);
3629 
3630   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3631   dcq.merge_bufferlists(&dirty_card_queue_set());
3632   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3633 
3634   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3635 }
3636 
3637 // Weak Reference Processing support
3638 
3639 // An always "is_alive" closure that is used to preserve referents.
3640 // If the object is non-null then it's alive.  Used in the preservation
3641 // of referent objects that are pointed to by reference objects
3642 // discovered by the CM ref processor.
3643 class G1AlwaysAliveClosure: public BoolObjectClosure {

3644 public:

3645   bool do_object_b(oop p) {
3646     if (p != NULL) {
3647       return true;
3648     }
3649     return false;
3650   }
3651 };
3652 
3653 bool G1STWIsAliveClosure::do_object_b(oop p) {
3654   // An object is reachable if it is outside the collection set,
3655   // or is inside and copied.
3656   return !_g1h->is_in_cset(p) || p->is_forwarded();
3657 }
3658 
3659 // Non Copying Keep Alive closure
3660 class G1KeepAliveClosure: public OopClosure {
3661   G1CollectedHeap*_g1h;
3662 public:
3663   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3664   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3665   void do_oop(oop* p) {
3666     oop obj = *p;
3667     assert(obj != NULL, "the caller should have filtered out NULL values");
3668 
3669     const InCSetState cset_state =_g1h->in_cset_state(obj);
3670     if (!cset_state.is_in_cset_or_humongous()) {
3671       return;
3672     }
3673     if (cset_state.is_in_cset()) {
3674       assert( obj->is_forwarded(), "invariant" );
3675       *p = obj->forwardee();
3676     } else {
3677       assert(!obj->is_forwarded(), "invariant" );
3678       assert(cset_state.is_humongous(),
3679              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
3680      _g1h->set_humongous_is_live(obj);
3681     }
3682   }
3683 };
3684 
3685 // Copying Keep Alive closure - can be called from both
3686 // serial and parallel code as long as different worker
3687 // threads utilize different G1ParScanThreadState instances
3688 // and different queues.
3689 
3690 class G1CopyingKeepAliveClosure: public OopClosure {
3691   G1CollectedHeap*         _g1h;
3692   OopClosure*              _copy_non_heap_obj_cl;
3693   G1ParScanThreadState*    _par_scan_state;
3694 
3695 public:
3696   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3697                             OopClosure* non_heap_obj_cl,
3698                             G1ParScanThreadState* pss):
3699     _g1h(g1h),
3700     _copy_non_heap_obj_cl(non_heap_obj_cl),


3904     _g1h(g1h),
3905     _pss(per_thread_states),
3906     _queues(task_queues),
3907     _terminator(workers, _queues),
3908     _n_workers(workers)
3909   {
3910     g1h->ref_processor_cm()->set_active_mt_degree(workers);
3911   }
3912 
3913   void work(uint worker_id) {
3914     G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
3915 
3916     ResourceMark rm;
3917     HandleMark   hm;
3918 
3919     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
3920     pss->set_ref_processor(NULL);
3921     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3922 
3923     // Is alive closure
3924     G1AlwaysAliveClosure always_alive;
3925 
3926     // Copying keep alive closure. Applied to referent objects that need
3927     // to be copied.
3928     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
3929 
3930     ReferenceProcessor* rp = _g1h->ref_processor_cm();
3931 
3932     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
3933     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
3934 
3935     // limit is set using max_num_q() - which was set using ParallelGCThreads.
3936     // So this must be true - but assert just in case someone decides to
3937     // change the worker ids.
3938     assert(worker_id < limit, "sanity");
3939     assert(!rp->discovery_is_atomic(), "check this code");
3940 
3941     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
3942     for (uint idx = worker_id; idx < limit; idx += stride) {
3943       DiscoveredList& ref_list = rp->discovered_refs()[idx];
3944 


< prev index next >