< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 59232 : [mq]: scan_task


1524   _is_alive_closure_stw(this),
1525   _is_subject_to_discovery_stw(this),
1526   _ref_processor_cm(NULL),
1527   _is_alive_closure_cm(this),
1528   _is_subject_to_discovery_cm(this),
1529   _region_attr() {
1530 
1531   _verifier = new G1HeapVerifier(this);
1532 
1533   _allocator = new G1Allocator(this);
1534 
1535   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
1536 
1537   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1538 
1539   // Override the default _filler_array_max_size so that no humongous filler
1540   // objects are created.
1541   _filler_array_max_size = _humongous_object_threshold_in_words;
1542 
1543   uint n_queues = ParallelGCThreads;
1544   _task_queues = new RefToScanQueueSet(n_queues);
1545 
1546   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1547 
1548   for (uint i = 0; i < n_queues; i++) {
1549     RefToScanQueue* q = new RefToScanQueue();
1550     q->initialize();
1551     _task_queues->register_queue(i, q);
1552     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1553   }
1554 
1555   // Initialize the G1EvacuationFailureALot counters and flags.
1556   NOT_PRODUCT(reset_evacuation_should_fail();)
1557   _gc_tracer_stw->initialize();
1558 
1559   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1560 }
1561 
1562 static size_t actual_reserved_page_size(ReservedSpace rs) {
1563   size_t page_size = os::vm_page_size();
1564   if (UseLargePages) {
1565     // There are two ways to manage large page memory.
1566     // 1. OS supports committing large page memory.
1567     // 2. OS doesn't support committing large page memory so ReservedSpace manages it.
1568     //    And ReservedSpace calls it 'special'. If we failed to set 'special',
1569     //    we reserved memory without large page.


3381 
3382   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3383   virtual void do_oop(      oop* p) { do_oop_work(p); }
3384 
3385   template <class T> void do_oop_work(T* p) {
3386     oop obj = RawAccess<>::oop_load(p);
3387 
3388     if (_g1h->is_in_cset_or_humongous(obj)) {
3389       // If the referent object has been forwarded (either copied
3390       // to a new location or to itself in the event of an
3391       // evacuation failure) then we need to update the reference
3392       // field and, if both reference and referent are in the G1
3393       // heap, update the RSet for the referent.
3394       //
3395       // If the referent has not been forwarded then we have to keep
3396       // it alive by policy. Therefore we have copy the referent.
3397       //
3398       // When the queue is drained (after each phase of reference processing)
3399       // the object and it's followers will be copied, the reference field set
3400       // to point to the new location, and the RSet updated.
3401       _par_scan_state->push_on_queue(p);
3402     }
3403   }
3404 };
3405 
3406 // Serial drain queue closure. Called as the 'complete_gc'
3407 // closure for each discovered list in some of the
3408 // reference processing phases.
3409 
3410 class G1STWDrainQueueClosure: public VoidClosure {
3411 protected:
3412   G1CollectedHeap* _g1h;
3413   G1ParScanThreadState* _par_scan_state;
3414 
3415   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
3416 
3417 public:
3418   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
3419     _g1h(g1h),
3420     _par_scan_state(pss)
3421   { }
3422 
3423   void do_void() {
3424     G1ParScanThreadState* const pss = par_scan_state();
3425     pss->trim_queue();
3426   }
3427 };
3428 
3429 // Parallel Reference Processing closures
3430 
3431 // Implementation of AbstractRefProcTaskExecutor for parallel reference
3432 // processing during G1 evacuation pauses.
3433 
3434 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
3435 private:
3436   G1CollectedHeap*          _g1h;
3437   G1ParScanThreadStateSet*  _pss;
3438   RefToScanQueueSet*        _queues;
3439   WorkGang*                 _workers;
3440 
3441 public:
3442   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
3443                            G1ParScanThreadStateSet* per_thread_states,
3444                            WorkGang* workers,
3445                            RefToScanQueueSet *task_queues) :
3446     _g1h(g1h),
3447     _pss(per_thread_states),
3448     _queues(task_queues),
3449     _workers(workers)
3450   {
3451     g1h->ref_processor_stw()->set_active_mt_degree(workers->active_workers());
3452   }
3453 
3454   // Executes the given task using concurrent marking worker threads.
3455   virtual void execute(ProcessTask& task, uint ergo_workers);
3456 };
3457 
3458 // Gang task for possibly parallel reference processing
3459 
3460 class G1STWRefProcTaskProxy: public AbstractGangTask {
3461   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
3462   ProcessTask&     _proc_task;
3463   G1CollectedHeap* _g1h;
3464   G1ParScanThreadStateSet* _pss;
3465   RefToScanQueueSet* _task_queues;
3466   TaskTerminator* _terminator;
3467 
3468 public:
3469   G1STWRefProcTaskProxy(ProcessTask& proc_task,
3470                         G1CollectedHeap* g1h,
3471                         G1ParScanThreadStateSet* per_thread_states,
3472                         RefToScanQueueSet *task_queues,
3473                         TaskTerminator* terminator) :
3474     AbstractGangTask("Process reference objects in parallel"),
3475     _proc_task(proc_task),
3476     _g1h(g1h),
3477     _pss(per_thread_states),
3478     _task_queues(task_queues),
3479     _terminator(terminator)
3480   {}
3481 
3482   virtual void work(uint worker_id) {
3483     // The reference processing task executed by a single worker.
3484     ResourceMark rm;
3485     HandleMark   hm;
3486 
3487     G1STWIsAliveClosure is_alive(_g1h);
3488 
3489     G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3490     pss->set_ref_discoverer(NULL);
3491 
3492     // Keep alive closure.


3783   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3784   if (collector_state()->in_initial_mark_gc()) {
3785     concurrent_mark()->pre_initial_mark();
3786 
3787     double start_clear_claimed_marks = os::elapsedTime();
3788 
3789     ClassLoaderDataGraph::clear_claimed_marks();
3790 
3791     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3792     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3793   }
3794 
3795   // Should G1EvacuationFailureALot be in effect for this GC?
3796   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3797 }
3798 
3799 class G1EvacuateRegionsBaseTask : public AbstractGangTask {
3800 protected:
3801   G1CollectedHeap* _g1h;
3802   G1ParScanThreadStateSet* _per_thread_states;
3803   RefToScanQueueSet* _task_queues;
3804   TaskTerminator _terminator;
3805   uint _num_workers;
3806 
3807   void evacuate_live_objects(G1ParScanThreadState* pss,
3808                              uint worker_id,
3809                              G1GCPhaseTimes::GCParPhases objcopy_phase,
3810                              G1GCPhaseTimes::GCParPhases termination_phase) {
3811     G1GCPhaseTimes* p = _g1h->phase_times();
3812 
3813     Ticks start = Ticks::now();
3814     G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
3815     cl.do_void();
3816 
3817     assert(pss->queue_is_empty(), "should be empty");
3818 
3819     Tickspan evac_time = (Ticks::now() - start);
3820     p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
3821 
3822     if (termination_phase == G1GCPhaseTimes::Termination) {
3823       p->record_time_secs(termination_phase, worker_id, cl.term_time());
3824       p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
3825     } else {
3826       p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
3827       p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
3828     }
3829     assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation");
3830   }
3831 
3832   virtual void start_work(uint worker_id) { }
3833 
3834   virtual void end_work(uint worker_id) { }
3835 
3836   virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
3837 
3838   virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
3839 
3840 public:
3841   G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) :



3842     AbstractGangTask(name),
3843     _g1h(G1CollectedHeap::heap()),
3844     _per_thread_states(per_thread_states),
3845     _task_queues(task_queues),
3846     _terminator(num_workers, _task_queues),
3847     _num_workers(num_workers)
3848   { }
3849 
3850   void work(uint worker_id) {
3851     start_work(worker_id);
3852 
3853     {
3854       ResourceMark rm;
3855       HandleMark   hm;
3856 
3857       G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
3858       pss->set_ref_discoverer(_g1h->ref_processor_stw());
3859 
3860       scan_roots(pss, worker_id);
3861       evacuate_live_objects(pss, worker_id);


3872     _root_processor->evacuate_roots(pss, worker_id);
3873     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy);
3874     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
3875   }
3876 
3877   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3878     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
3879   }
3880 
3881   void start_work(uint worker_id) {
3882     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
3883   }
3884 
3885   void end_work(uint worker_id) {
3886     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
3887   }
3888 
3889 public:
3890   G1EvacuateRegionsTask(G1CollectedHeap* g1h,
3891                         G1ParScanThreadStateSet* per_thread_states,
3892                         RefToScanQueueSet* task_queues,
3893                         G1RootProcessor* root_processor,
3894                         uint num_workers) :
3895     G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
3896     _root_processor(root_processor)
3897   { }
3898 };
3899 
3900 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3901   G1GCPhaseTimes* p = phase_times();
3902 
3903   {
3904     Ticks start = Ticks::now();
3905     rem_set()->merge_heap_roots(true /* initial_evacuation */);
3906     p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3907   }
3908 
3909   Tickspan task_time;
3910   const uint num_workers = workers()->active_workers();
3911 
3912   Ticks start_processing = Ticks::now();


3920   }
3921   Tickspan total_processing = Ticks::now() - start_processing;
3922 
3923   p->record_initial_evac_time(task_time.seconds() * 1000.0);
3924   p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3925 }
3926 
3927 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3928 
3929   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3930     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3931     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3932   }
3933 
3934   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3935     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3936   }
3937 
3938 public:
3939   G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3940                                 RefToScanQueueSet* queues,
3941                                 uint num_workers) :
3942     G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3943   }
3944 };
3945 
3946 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3947   class G1MarkScope : public MarkScope { };
3948 
3949   Tickspan task_time;
3950 
3951   Ticks start_processing = Ticks::now();
3952   {
3953     G1MarkScope code_mark_scope;
3954     G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3955     task_time = run_task(&task);
3956     // See comment in evacuate_collection_set() for the reason of the scope.
3957   }
3958   Tickspan total_processing = Ticks::now() - start_processing;
3959 
3960   G1GCPhaseTimes* p = phase_times();




1524   _is_alive_closure_stw(this),
1525   _is_subject_to_discovery_stw(this),
1526   _ref_processor_cm(NULL),
1527   _is_alive_closure_cm(this),
1528   _is_subject_to_discovery_cm(this),
1529   _region_attr() {
1530 
1531   _verifier = new G1HeapVerifier(this);
1532 
1533   _allocator = new G1Allocator(this);
1534 
1535   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
1536 
1537   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1538 
1539   // Override the default _filler_array_max_size so that no humongous filler
1540   // objects are created.
1541   _filler_array_max_size = _humongous_object_threshold_in_words;
1542 
1543   uint n_queues = ParallelGCThreads;
1544   _task_queues = new ScannerTasksQueueSet(n_queues);
1545 
1546   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1547 
1548   for (uint i = 0; i < n_queues; i++) {
1549     ScannerTasksQueue* q = new ScannerTasksQueue();
1550     q->initialize();
1551     _task_queues->register_queue(i, q);
1552     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1553   }
1554 
1555   // Initialize the G1EvacuationFailureALot counters and flags.
1556   NOT_PRODUCT(reset_evacuation_should_fail();)
1557   _gc_tracer_stw->initialize();
1558 
1559   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1560 }
1561 
1562 static size_t actual_reserved_page_size(ReservedSpace rs) {
1563   size_t page_size = os::vm_page_size();
1564   if (UseLargePages) {
1565     // There are two ways to manage large page memory.
1566     // 1. OS supports committing large page memory.
1567     // 2. OS doesn't support committing large page memory so ReservedSpace manages it.
1568     //    And ReservedSpace calls it 'special'. If we failed to set 'special',
1569     //    we reserved memory without large page.


3381 
3382   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3383   virtual void do_oop(      oop* p) { do_oop_work(p); }
3384 
3385   template <class T> void do_oop_work(T* p) {
3386     oop obj = RawAccess<>::oop_load(p);
3387 
3388     if (_g1h->is_in_cset_or_humongous(obj)) {
3389       // If the referent object has been forwarded (either copied
3390       // to a new location or to itself in the event of an
3391       // evacuation failure) then we need to update the reference
3392       // field and, if both reference and referent are in the G1
3393       // heap, update the RSet for the referent.
3394       //
3395       // If the referent has not been forwarded then we have to keep
3396       // it alive by policy. Therefore we have copy the referent.
3397       //
3398       // When the queue is drained (after each phase of reference processing)
3399       // the object and it's followers will be copied, the reference field set
3400       // to point to the new location, and the RSet updated.
3401       _par_scan_state->push_on_queue(ScannerTask(p));
3402     }
3403   }
3404 };
3405 
3406 // Serial drain queue closure. Called as the 'complete_gc'
3407 // closure for each discovered list in some of the
3408 // reference processing phases.
3409 
3410 class G1STWDrainQueueClosure: public VoidClosure {
3411 protected:
3412   G1CollectedHeap* _g1h;
3413   G1ParScanThreadState* _par_scan_state;
3414 
3415   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
3416 
3417 public:
3418   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
3419     _g1h(g1h),
3420     _par_scan_state(pss)
3421   { }
3422 
3423   void do_void() {
3424     G1ParScanThreadState* const pss = par_scan_state();
3425     pss->trim_queue();
3426   }
3427 };
3428 
3429 // Parallel Reference Processing closures
3430 
3431 // Implementation of AbstractRefProcTaskExecutor for parallel reference
3432 // processing during G1 evacuation pauses.
3433 
3434 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
3435 private:
3436   G1CollectedHeap*          _g1h;
3437   G1ParScanThreadStateSet*  _pss;
3438   ScannerTasksQueueSet*     _queues;
3439   WorkGang*                 _workers;
3440 
3441 public:
3442   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
3443                            G1ParScanThreadStateSet* per_thread_states,
3444                            WorkGang* workers,
3445                            ScannerTasksQueueSet *task_queues) :
3446     _g1h(g1h),
3447     _pss(per_thread_states),
3448     _queues(task_queues),
3449     _workers(workers)
3450   {
3451     g1h->ref_processor_stw()->set_active_mt_degree(workers->active_workers());
3452   }
3453 
3454   // Executes the given task using concurrent marking worker threads.
3455   virtual void execute(ProcessTask& task, uint ergo_workers);
3456 };
3457 
3458 // Gang task for possibly parallel reference processing
3459 
3460 class G1STWRefProcTaskProxy: public AbstractGangTask {
3461   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
3462   ProcessTask&     _proc_task;
3463   G1CollectedHeap* _g1h;
3464   G1ParScanThreadStateSet* _pss;
3465   ScannerTasksQueueSet* _task_queues;
3466   TaskTerminator* _terminator;
3467 
3468 public:
3469   G1STWRefProcTaskProxy(ProcessTask& proc_task,
3470                         G1CollectedHeap* g1h,
3471                         G1ParScanThreadStateSet* per_thread_states,
3472                         ScannerTasksQueueSet *task_queues,
3473                         TaskTerminator* terminator) :
3474     AbstractGangTask("Process reference objects in parallel"),
3475     _proc_task(proc_task),
3476     _g1h(g1h),
3477     _pss(per_thread_states),
3478     _task_queues(task_queues),
3479     _terminator(terminator)
3480   {}
3481 
3482   virtual void work(uint worker_id) {
3483     // The reference processing task executed by a single worker.
3484     ResourceMark rm;
3485     HandleMark   hm;
3486 
3487     G1STWIsAliveClosure is_alive(_g1h);
3488 
3489     G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3490     pss->set_ref_discoverer(NULL);
3491 
3492     // Keep alive closure.


3783   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3784   if (collector_state()->in_initial_mark_gc()) {
3785     concurrent_mark()->pre_initial_mark();
3786 
3787     double start_clear_claimed_marks = os::elapsedTime();
3788 
3789     ClassLoaderDataGraph::clear_claimed_marks();
3790 
3791     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3792     phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3793   }
3794 
3795   // Should G1EvacuationFailureALot be in effect for this GC?
3796   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3797 }
3798 
3799 class G1EvacuateRegionsBaseTask : public AbstractGangTask {
3800 protected:
3801   G1CollectedHeap* _g1h;
3802   G1ParScanThreadStateSet* _per_thread_states;
3803   ScannerTasksQueueSet* _task_queues;
3804   TaskTerminator _terminator;
3805   uint _num_workers;
3806 
3807   void evacuate_live_objects(G1ParScanThreadState* pss,
3808                              uint worker_id,
3809                              G1GCPhaseTimes::GCParPhases objcopy_phase,
3810                              G1GCPhaseTimes::GCParPhases termination_phase) {
3811     G1GCPhaseTimes* p = _g1h->phase_times();
3812 
3813     Ticks start = Ticks::now();
3814     G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
3815     cl.do_void();
3816 
3817     assert(pss->queue_is_empty(), "should be empty");
3818 
3819     Tickspan evac_time = (Ticks::now() - start);
3820     p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
3821 
3822     if (termination_phase == G1GCPhaseTimes::Termination) {
3823       p->record_time_secs(termination_phase, worker_id, cl.term_time());
3824       p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
3825     } else {
3826       p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
3827       p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
3828     }
3829     assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation");
3830   }
3831 
3832   virtual void start_work(uint worker_id) { }
3833 
3834   virtual void end_work(uint worker_id) { }
3835 
3836   virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
3837 
3838   virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
3839 
3840 public:
3841   G1EvacuateRegionsBaseTask(const char* name,
3842                             G1ParScanThreadStateSet* per_thread_states,
3843                             ScannerTasksQueueSet* task_queues,
3844                             uint num_workers) :
3845     AbstractGangTask(name),
3846     _g1h(G1CollectedHeap::heap()),
3847     _per_thread_states(per_thread_states),
3848     _task_queues(task_queues),
3849     _terminator(num_workers, _task_queues),
3850     _num_workers(num_workers)
3851   { }
3852 
3853   void work(uint worker_id) {
3854     start_work(worker_id);
3855 
3856     {
3857       ResourceMark rm;
3858       HandleMark   hm;
3859 
3860       G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
3861       pss->set_ref_discoverer(_g1h->ref_processor_stw());
3862 
3863       scan_roots(pss, worker_id);
3864       evacuate_live_objects(pss, worker_id);


3875     _root_processor->evacuate_roots(pss, worker_id);
3876     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy);
3877     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
3878   }
3879 
3880   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3881     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
3882   }
3883 
3884   void start_work(uint worker_id) {
3885     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
3886   }
3887 
3888   void end_work(uint worker_id) {
3889     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
3890   }
3891 
3892 public:
3893   G1EvacuateRegionsTask(G1CollectedHeap* g1h,
3894                         G1ParScanThreadStateSet* per_thread_states,
3895                         ScannerTasksQueueSet* task_queues,
3896                         G1RootProcessor* root_processor,
3897                         uint num_workers) :
3898     G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
3899     _root_processor(root_processor)
3900   { }
3901 };
3902 
3903 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3904   G1GCPhaseTimes* p = phase_times();
3905 
3906   {
3907     Ticks start = Ticks::now();
3908     rem_set()->merge_heap_roots(true /* initial_evacuation */);
3909     p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3910   }
3911 
3912   Tickspan task_time;
3913   const uint num_workers = workers()->active_workers();
3914 
3915   Ticks start_processing = Ticks::now();


3923   }
3924   Tickspan total_processing = Ticks::now() - start_processing;
3925 
3926   p->record_initial_evac_time(task_time.seconds() * 1000.0);
3927   p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3928 }
3929 
3930 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3931 
3932   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3933     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3934     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3935   }
3936 
3937   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3938     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3939   }
3940 
3941 public:
3942   G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3943                                 ScannerTasksQueueSet* queues,
3944                                 uint num_workers) :
3945     G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3946   }
3947 };
3948 
3949 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3950   class G1MarkScope : public MarkScope { };
3951 
3952   Tickspan task_time;
3953 
3954   Ticks start_processing = Ticks::now();
3955   {
3956     G1MarkScope code_mark_scope;
3957     G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3958     task_time = run_task(&task);
3959     // See comment in evacuate_collection_set() for the reason of the scope.
3960   }
3961   Tickspan total_processing = Ticks::now() - start_processing;
3962 
3963   G1GCPhaseTimes* p = phase_times();


< prev index next >