< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 52675 : 8213890: Implementation of JEP 344: Abortable Mixed Collections for G1
Reviewed-by:
Contributed-by: erik.helin@oracle.com, stefan.johansson@oracle.com
rev 52676 : imported patch AMGC-impl
rev 52677 : imported patch AMGC-tsch-rev1
rev 52678 : imported patch AMGC-tsch-rev1-optcset
rev 52679 : imported patch AMGC-tsch-rev1-log


2980         // If the remembered sets are not up to date we might miss some
2981         // entries that need to be handled.
2982         g1_rem_set()->cleanupHRRS();
2983 
2984         register_humongous_regions_with_cset();
2985 
2986         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
2987 
2988         // We call this after finalize_cset() to
2989         // ensure that the CSet has been finalized.
2990         _cm->verify_no_cset_oops();
2991 
2992         if (_hr_printer.is_active()) {
2993           G1PrintCollectionSetClosure cl(&_hr_printer);
2994           _collection_set.iterate(&cl);
2995         }
2996 
2997         // Initialize the GC alloc regions.
2998         _allocator->init_gc_alloc_regions(evacuation_info);
2999 
3000         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());



3001         pre_evacuate_collection_set();
3002 
3003         // Actually do the work...
3004         evacuate_collection_set(&per_thread_states);

3005 
3006         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3007 
3008         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3009         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3010 
3011         eagerly_reclaim_humongous_regions();
3012 
3013         record_obj_copy_mem_stats();
3014         _survivor_evac_stats.adjust_desired_plab_sz();
3015         _old_evac_stats.adjust_desired_plab_sz();
3016 
3017         double start = os::elapsedTime();
3018         start_new_collection_set();
3019         g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3020 
3021         if (evacuation_failed()) {
3022           double recalculate_used_start = os::elapsedTime();
3023           set_used(recalculate_used());
3024           g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);


3175   }
3176 
3177   _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3178   _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3179 }
3180 
3181 bool G1ParEvacuateFollowersClosure::offer_termination() {
3182   EventGCPhaseParallel event;
3183   G1ParScanThreadState* const pss = par_scan_state();
3184   start_term_time();
3185   const bool res = terminator()->offer_termination();
3186   end_term_time();
3187   event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
3188   return res;
3189 }
3190 
3191 void G1ParEvacuateFollowersClosure::do_void() {
3192   EventGCPhaseParallel event;
3193   G1ParScanThreadState* const pss = par_scan_state();
3194   pss->trim_queue();
3195   event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ObjCopy));
3196   do {
3197     EventGCPhaseParallel event;
3198     pss->steal_and_trim_queue(queues());
3199     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ObjCopy));
3200   } while (!offer_termination());
3201 }
3202 
3203 class G1ParTask : public AbstractGangTask {
3204 protected:
3205   G1CollectedHeap*         _g1h;
3206   G1ParScanThreadStateSet* _pss;
3207   RefToScanQueueSet*       _queues;
3208   G1RootProcessor*         _root_processor;
3209   ParallelTaskTerminator   _terminator;
3210   uint                     _n_workers;
3211 
3212 public:
3213   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3214     : AbstractGangTask("G1 collection"),
3215       _g1h(g1h),
3216       _pss(per_thread_states),
3217       _queues(task_queues),
3218       _root_processor(root_processor),
3219       _terminator(n_workers, _queues),


3234 
3235       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
3236       pss->set_ref_discoverer(rp);
3237 
3238       double start_strong_roots_sec = os::elapsedTime();
3239 
3240       _root_processor->evacuate_roots(pss, worker_id);
3241 
3242       // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
3243       // treating the nmethods visited to act as roots for concurrent marking.
3244       // We only want to make sure that the oops in the nmethods are adjusted with regard to the
3245       // objects copied by the current evacuation.
3246       _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
3247 
3248       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3249 
3250       double term_sec = 0.0;
3251       size_t evac_term_attempts = 0;
3252       {
3253         double start = os::elapsedTime();
3254         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
3255         evac.do_void();
3256 
3257         evac_term_attempts = evac.term_attempts();
3258         term_sec = evac.term_time();
3259         double elapsed_sec = os::elapsedTime() - start;
3260 
3261         G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3262         p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3263         p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3264         p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3265       }
3266 
3267       assert(pss->queue_is_empty(), "should be empty");
3268 
3269       if (log_is_enabled(Debug, gc, task, stats)) {
3270         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3271         size_t lab_waste;
3272         size_t lab_undo_waste;
3273         pss->waste(lab_waste, lab_undo_waste);
3274         _g1h->print_termination_stats(worker_id,


3525     _g1h(g1h),
3526     _pss(per_thread_states),
3527     _task_queues(task_queues),
3528     _terminator(terminator)
3529   {}
3530 
3531   virtual void work(uint worker_id) {
3532     // The reference processing task executed by a single worker.
3533     ResourceMark rm;
3534     HandleMark   hm;
3535 
3536     G1STWIsAliveClosure is_alive(_g1h);
3537 
3538     G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3539     pss->set_ref_discoverer(NULL);
3540 
3541     // Keep alive closure.
3542     G1CopyingKeepAliveClosure keep_alive(_g1h, pss);
3543 
3544     // Complete GC closure
3545     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
3546 
3547     // Call the reference processing task's work routine.
3548     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
3549 
3550     // Note we cannot assert that the refs array is empty here as not all
3551     // of the processing tasks (specifically phase2 - pp2_work) execute
3552     // the complete_gc closure (which ordinarily would drain the queue) so
3553     // the queue may not be empty.
3554   }
3555 };
3556 
3557 // Driver routine for parallel reference processing.
3558 // Creates an instance of the ref processing gang
3559 // task and has the worker threads execute it.
3560 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
3561   assert(_workers != NULL, "Need parallel worker threads.");
3562 
3563   assert(_workers->active_workers() >= ergo_workers,
3564          "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
3565          ergo_workers, _workers->active_workers());


3695     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
3696 
3697     print_termination_stats_hdr();
3698 
3699     workers()->run_task(&g1_par_task);
3700     end_par_time_sec = os::elapsedTime();
3701 
3702     // Closing the inner scope will execute the destructor
3703     // for the G1RootProcessor object. We record the current
3704     // elapsed time before closing the scope so that time
3705     // taken for the destructor is NOT included in the
3706     // reported parallel time.
3707   }
3708 
3709   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
3710   phase_times->record_par_time(par_time_ms);
3711 
3712   double code_root_fixup_time_ms =
3713         (os::elapsedTime() - end_par_time_sec) * 1000.0;
3714   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);






































































































































3715 }
3716 
3717 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3718   // Also cleans the card table from temporary duplicate detection information used
3719   // during UpdateRS/ScanRS.
3720   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3721 
3722   // Process any discovered reference objects - we have
3723   // to do this _before_ we retire the GC alloc regions
3724   // as we may have to copy some 'reachable' referent
3725   // objects (and their reachable sub-graphs) that were
3726   // not copied during the pause.
3727   process_discovered_references(per_thread_states);
3728 
3729   // FIXME
3730   // CM's reference processing also cleans up the string table.
3731   // Should we do that here also? We could, but it is a serial operation
3732   // and could significantly increase the pause time.
3733 
3734   G1STWIsAliveClosure is_alive(this);




2980         // If the remembered sets are not up to date we might miss some
2981         // entries that need to be handled.
2982         g1_rem_set()->cleanupHRRS();
2983 
2984         register_humongous_regions_with_cset();
2985 
2986         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
2987 
2988         // We call this after finalize_cset() to
2989         // ensure that the CSet has been finalized.
2990         _cm->verify_no_cset_oops();
2991 
2992         if (_hr_printer.is_active()) {
2993           G1PrintCollectionSetClosure cl(&_hr_printer);
2994           _collection_set.iterate(&cl);
2995         }
2996 
2997         // Initialize the GC alloc regions.
2998         _allocator->init_gc_alloc_regions(evacuation_info);
2999 
3000         G1ParScanThreadStateSet per_thread_states(this,
3001                                                   workers()->active_workers(),
3002                                                   collection_set()->young_region_length(),
3003                                                   collection_set()->optional_region_length());
3004         pre_evacuate_collection_set();
3005 
3006         // Actually do the work...
3007         evacuate_collection_set(&per_thread_states);
3008         evacuate_optional_collection_set(&per_thread_states);
3009 
3010         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3011 
3012         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3013         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3014 
3015         eagerly_reclaim_humongous_regions();
3016 
3017         record_obj_copy_mem_stats();
3018         _survivor_evac_stats.adjust_desired_plab_sz();
3019         _old_evac_stats.adjust_desired_plab_sz();
3020 
3021         double start = os::elapsedTime();
3022         start_new_collection_set();
3023         g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3024 
3025         if (evacuation_failed()) {
3026           double recalculate_used_start = os::elapsedTime();
3027           set_used(recalculate_used());
3028           g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);


3179   }
3180 
3181   _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3182   _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3183 }
3184 
3185 bool G1ParEvacuateFollowersClosure::offer_termination() {
3186   EventGCPhaseParallel event;
3187   G1ParScanThreadState* const pss = par_scan_state();
3188   start_term_time();
3189   const bool res = terminator()->offer_termination();
3190   end_term_time();
3191   event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
3192   return res;
3193 }
3194 
3195 void G1ParEvacuateFollowersClosure::do_void() {
3196   EventGCPhaseParallel event;
3197   G1ParScanThreadState* const pss = par_scan_state();
3198   pss->trim_queue();
3199   event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
3200   do {
3201     EventGCPhaseParallel event;
3202     pss->steal_and_trim_queue(queues());
3203     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
3204   } while (!offer_termination());
3205 }
3206 
3207 class G1ParTask : public AbstractGangTask {
3208 protected:
3209   G1CollectedHeap*         _g1h;
3210   G1ParScanThreadStateSet* _pss;
3211   RefToScanQueueSet*       _queues;
3212   G1RootProcessor*         _root_processor;
3213   ParallelTaskTerminator   _terminator;
3214   uint                     _n_workers;
3215 
3216 public:
3217   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3218     : AbstractGangTask("G1 collection"),
3219       _g1h(g1h),
3220       _pss(per_thread_states),
3221       _queues(task_queues),
3222       _root_processor(root_processor),
3223       _terminator(n_workers, _queues),


3238 
3239       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
3240       pss->set_ref_discoverer(rp);
3241 
3242       double start_strong_roots_sec = os::elapsedTime();
3243 
3244       _root_processor->evacuate_roots(pss, worker_id);
3245 
3246       // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
3247       // treating the nmethods visited to act as roots for concurrent marking.
3248       // We only want to make sure that the oops in the nmethods are adjusted with regard to the
3249       // objects copied by the current evacuation.
3250       _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
3251 
3252       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3253 
3254       double term_sec = 0.0;
3255       size_t evac_term_attempts = 0;
3256       {
3257         double start = os::elapsedTime();
3258         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::ObjCopy);
3259         evac.do_void();
3260 
3261         evac_term_attempts = evac.term_attempts();
3262         term_sec = evac.term_time();
3263         double elapsed_sec = os::elapsedTime() - start;
3264 
3265         G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3266         p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3267         p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3268         p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3269       }
3270 
3271       assert(pss->queue_is_empty(), "should be empty");
3272 
3273       if (log_is_enabled(Debug, gc, task, stats)) {
3274         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3275         size_t lab_waste;
3276         size_t lab_undo_waste;
3277         pss->waste(lab_waste, lab_undo_waste);
3278         _g1h->print_termination_stats(worker_id,


3529     _g1h(g1h),
3530     _pss(per_thread_states),
3531     _task_queues(task_queues),
3532     _terminator(terminator)
3533   {}
3534 
3535   virtual void work(uint worker_id) {
3536     // The reference processing task executed by a single worker.
3537     ResourceMark rm;
3538     HandleMark   hm;
3539 
3540     G1STWIsAliveClosure is_alive(_g1h);
3541 
3542     G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3543     pss->set_ref_discoverer(NULL);
3544 
3545     // Keep alive closure.
3546     G1CopyingKeepAliveClosure keep_alive(_g1h, pss);
3547 
3548     // Complete GC closure
3549     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator, G1GCPhaseTimes::ObjCopy);
3550 
3551     // Call the reference processing task's work routine.
3552     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
3553 
3554     // Note we cannot assert that the refs array is empty here as not all
3555     // of the processing tasks (specifically phase2 - pp2_work) execute
3556     // the complete_gc closure (which ordinarily would drain the queue) so
3557     // the queue may not be empty.
3558   }
3559 };
3560 
3561 // Driver routine for parallel reference processing.
3562 // Creates an instance of the ref processing gang
3563 // task and has the worker threads execute it.
3564 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
3565   assert(_workers != NULL, "Need parallel worker threads.");
3566 
3567   assert(_workers->active_workers() >= ergo_workers,
3568          "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
3569          ergo_workers, _workers->active_workers());


3699     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
3700 
3701     print_termination_stats_hdr();
3702 
3703     workers()->run_task(&g1_par_task);
3704     end_par_time_sec = os::elapsedTime();
3705 
3706     // Closing the inner scope will execute the destructor
3707     // for the G1RootProcessor object. We record the current
3708     // elapsed time before closing the scope so that time
3709     // taken for the destructor is NOT included in the
3710     // reported parallel time.
3711   }
3712 
3713   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
3714   phase_times->record_par_time(par_time_ms);
3715 
3716   double code_root_fixup_time_ms =
3717         (os::elapsedTime() - end_par_time_sec) * 1000.0;
3718   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
3719 }
3720 
3721 class G1EvacuateOptionalRegionTask : public AbstractGangTask {
3722   G1CollectedHeap* _g1h;
3723   G1ParScanThreadStateSet* _per_thread_states;
3724   G1OptionalCSet* _optional;
3725   RefToScanQueueSet* _queues;
3726   ParallelTaskTerminator _terminator;
3727 
3728   Tickspan trim_ticks(G1ParScanThreadState* pss) {
3729     Tickspan copy_time = pss->trim_ticks();
3730     pss->reset_trim_ticks();
3731     return copy_time;
3732   }
3733 
3734   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3735     G1EvacuationRootClosures* root_cls = pss->closures();
3736     G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
3737 
3738     size_t scanned = 0;
3739     size_t claimed = 0;
3740     size_t skipped = 0;
3741 
3742     Ticks    start = Ticks::now();
3743     Tickspan copy_time;
3744 
3745     for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
3746       HeapRegion* hr = _optional->region_at(i);
3747       G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
3748       pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
3749       copy_time += trim_ticks(pss);
3750 
3751       G1ScanRSForRegionClosure scan_rs_cl(_g1h->g1_rem_set()->scan_state(), &obj_cl, pss, worker_id);
3752       scan_rs_cl.do_heap_region(hr);
3753       copy_time += trim_ticks(pss);
3754       scanned += scan_rs_cl.cards_scanned();
3755       claimed += scan_rs_cl.cards_claimed();
3756       skipped += scan_rs_cl.cards_skipped();
3757     }
3758 
3759     Tickspan scan_time = (Ticks::now() - start) - copy_time;
3760     G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3761     p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
3762     p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
3763 
3764     p->record_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
3765     p->record_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
3766     p->record_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
3767   }
3768 
3769   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3770     Ticks start = Ticks::now();
3771     G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
3772     cl.do_void();
3773 
3774     Tickspan evac_time = (Ticks::now() - start);
3775     G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3776     p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
3777     assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
3778   }
3779 
3780  public:
3781   G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
3782                                G1ParScanThreadStateSet* per_thread_states,
3783                                G1OptionalCSet* cset,
3784                                RefToScanQueueSet* queues,
3785                                uint n_workers) :
3786     AbstractGangTask("G1 Evacuation Optional Region Task"),
3787     _g1h(g1h),
3788     _per_thread_states(per_thread_states),
3789     _optional(cset),
3790     _queues(queues),
3791     _terminator(n_workers, _queues) {
3792   }
3793 
3794   void work(uint worker_id) {
3795     ResourceMark rm;
3796     HandleMark  hm;
3797 
3798     G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
3799     pss->set_ref_discoverer(_g1h->ref_processor_stw());
3800 
3801     scan_roots(pss, worker_id);
3802     evacuate_live_objects(pss, worker_id);
3803   }
3804 };
3805 
3806 void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
3807   class G1MarkScope : public MarkScope {};
3808   G1MarkScope code_mark_scope;
3809 
3810   G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
3811   workers()->run_task(&task);
3812 }
3813 
3814 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3815   G1OptionalCSet optional_cset(&_collection_set);
3816   if (optional_cset.is_empty()) {
3817     return;
3818   }
3819 
3820   if (evacuation_failed()) {
3821     return;
3822   }
3823 
3824   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
3825   const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0;
3826 
3827   double start_time_sec = os::elapsedTime();
3828 
3829   do {
3830     double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3831     double time_left_ms = MaxGCPauseMillis - time_used_ms;
3832 
3833     if (time_left_ms < 0) {
3834       log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
3835       break;
3836     }
3837 
3838     optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
3839     if (optional_cset.prepare_failed()) {
3840       log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
3841       break;
3842     }
3843 
3844     evacuate_optional_regions(per_thread_states, &optional_cset);
3845 
3846     optional_cset.complete_evacuation();
3847     if (optional_cset.evacuation_failed()) {
3848       break;
3849     }
3850   } while (!optional_cset.is_empty());
3851 
3852   phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
3853 }
3854 
3855 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3856   // Also cleans the card table from temporary duplicate detection information used
3857   // during UpdateRS/ScanRS.
3858   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3859 
3860   // Process any discovered reference objects - we have
3861   // to do this _before_ we retire the GC alloc regions
3862   // as we may have to copy some 'reachable' referent
3863   // objects (and their reachable sub-graphs) that were
3864   // not copied during the pause.
3865   process_discovered_references(per_thread_states);
3866 
3867   // FIXME
3868   // CM's reference processing also cleans up the string table.
3869   // Should we do that here also? We could, but it is a serial operation
3870   // and could significantly increase the pause time.
3871 
3872   G1STWIsAliveClosure is_alive(this);


< prev index next >