1071 hrrs->clear();
1072 // You might think here that we could clear just the cards
1073 // corresponding to the used region. But no: if we leave a dirty card
1074 // in a region we might allocate into, then it would prevent that card
1075 // from being enqueued, and cause it to be missed.
1076 // Re: the performance cost: we shouldn't be doing full GC anyway!
1077 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1078
1079 return false;
1080 }
1081 };
1082
1083 void G1CollectedHeap::clear_rsets_post_compaction() {
1084 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1085 heap_region_iterate(&rs_clear);
1086 }
1087
1088 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1089 G1CollectedHeap* _g1h;
1090 UpdateRSOopClosure _cl;
1091 int _worker_i;
1092 public:
1093 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1094 _cl(g1->g1_rem_set(), worker_i),
1095 _worker_i(worker_i),
1096 _g1h(g1)
1097 { }
1098
1099 bool doHeapRegion(HeapRegion* r) {
1100 if (!r->is_continues_humongous()) {
1101 _cl.set_from(r);
1102 r->oop_iterate(&_cl);
1103 }
1104 return false;
1105 }
1106 };
1107
1108 class ParRebuildRSTask: public AbstractGangTask {
1109 G1CollectedHeap* _g1;
1110 HeapRegionClaimer _hrclaimer;
1111
1112 public:
1113 ParRebuildRSTask(G1CollectedHeap* g1) :
3056 }
3057
3058 bool failures = rootsCl.failures() || codeRootsCl.failures();
3059
3060 if (vo != VerifyOption_G1UseMarkWord) {
3061 // If we're verifying during a full GC then the region sets
3062 // will have been torn down at the start of the GC. Therefore
3063 // verifying the region sets will fail. So we only verify
3064 // the region sets when not in a full GC.
3065 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3066 verify_region_sets();
3067 }
3068
3069 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3070 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3071
3072 G1ParVerifyTask task(this, vo);
3073 assert(UseDynamicNumberOfGCThreads ||
3074 workers()->active_workers() == workers()->total_workers(),
3075 "If not dynamic should be using all the workers");
3076 int n_workers = workers()->active_workers();
3077 set_par_threads(n_workers);
3078 workers()->run_task(&task);
3079 set_par_threads(0);
3080 if (task.failures()) {
3081 failures = true;
3082 }
3083
3084 } else {
3085 VerifyRegionClosure blk(false, vo);
3086 heap_region_iterate(&blk);
3087 if (blk.failures()) {
3088 failures = true;
3089 }
3090 }
3091
3092 if (G1StringDedup::is_enabled()) {
3093 if (!silent) gclog_or_tty->print("StrDedup ");
3094 G1StringDedup::verify();
3095 }
3096
3594 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3595
3596 // Here's a good place to add any other checks we'd like to
3597 // perform on CSet regions.
3598 return false;
3599 }
3600 };
3601 #endif // ASSERT
3602
3603 #if TASKQUEUE_STATS
3604 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3605 st->print_raw_cr("GC Task Stats");
3606 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3607 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3608 }
3609
3610 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3611 print_taskqueue_stats_hdr(st);
3612
3613 TaskQueueStats totals;
3614 const int n = workers()->total_workers();
3615 for (int i = 0; i < n; ++i) {
3616 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3617 totals += task_queue(i)->stats;
3618 }
3619 st->print_raw("tot "); totals.print(st); st->cr();
3620
3621 DEBUG_ONLY(totals.verify());
3622 }
3623
3624 void G1CollectedHeap::reset_taskqueue_stats() {
3625 const int n = workers()->total_workers();
3626 for (int i = 0; i < n; ++i) {
3627 task_queue(i)->stats.reset();
3628 }
3629 }
3630 #endif // TASKQUEUE_STATS
3631
3632 void G1CollectedHeap::log_gc_header() {
3633 if (!G1Log::fine()) {
3634 return;
3635 }
3636
3637 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3638
3639 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3640 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3641 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3642
3643 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3644 }
3645
3646 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
4353 Mutex* stats_lock() { return &_stats_lock; }
4354
4355 public:
4356 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
4357 : AbstractGangTask("G1 collection"),
4358 _g1h(g1h),
4359 _queues(task_queues),
4360 _root_processor(root_processor),
4361 _terminator(0, _queues),
4362 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4363 {}
4364
4365 RefToScanQueueSet* queues() { return _queues; }
4366
4367 RefToScanQueue *work_queue(int i) {
4368 return queues()->queue(i);
4369 }
4370
4371 ParallelTaskTerminator* terminator() { return &_terminator; }
4372
4373 virtual void set_for_termination(int active_workers) {
4374 _root_processor->set_num_workers(active_workers);
4375 terminator()->reset_for_reuse(active_workers);
4376 _n_workers = active_workers;
4377 }
4378
4379 // Helps out with CLD processing.
4380 //
4381 // During InitialMark we need to:
4382 // 1) Scavenge all CLDs for the young GC.
4383 // 2) Mark all objects directly reachable from strong CLDs.
4384 template <G1Mark do_mark_object>
4385 class G1CLDClosure : public CLDClosure {
4386 G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
4387 G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
4388 G1KlassScanClosure _klass_in_cld_closure;
4389 bool _claim;
4390
4391 public:
4392 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4393 bool only_young, bool claim)
5033 _g1h(g1h),
5034 _par_scan_state(pss)
5035 { }
5036
5037 void do_void() {
5038 G1ParScanThreadState* const pss = par_scan_state();
5039 pss->trim_queue();
5040 }
5041 };
5042
5043 // Parallel Reference Processing closures
5044
5045 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5046 // processing during G1 evacuation pauses.
5047
5048 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5049 private:
5050 G1CollectedHeap* _g1h;
5051 RefToScanQueueSet* _queues;
5052 FlexibleWorkGang* _workers;
5053 int _active_workers;
5054
5055 public:
5056 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5057 FlexibleWorkGang* workers,
5058 RefToScanQueueSet *task_queues,
5059 int n_workers) :
5060 _g1h(g1h),
5061 _queues(task_queues),
5062 _workers(workers),
5063 _active_workers(n_workers)
5064 {
5065 assert(n_workers > 0, "shouldn't call this otherwise");
5066 }
5067
5068 // Executes the given task using concurrent marking worker threads.
5069 virtual void execute(ProcessTask& task);
5070 virtual void execute(EnqueueTask& task);
5071 };
5072
5073 // Gang task for possibly parallel reference processing
5074
5075 class G1STWRefProcTaskProxy: public AbstractGangTask {
5076 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5077 ProcessTask& _proc_task;
5078 G1CollectedHeap* _g1h;
5079 RefToScanQueueSet *_task_queues;
5172
5173 _g1h->set_par_threads(_active_workers);
5174 _workers->run_task(&enq_task_proxy);
5175 _g1h->set_par_threads(0);
5176 }
5177
5178 // End of weak reference support closures
5179
5180 // Abstract task used to preserve (i.e. copy) any referent objects
5181 // that are in the collection set and are pointed to by reference
5182 // objects discovered by the CM ref processor.
5183
5184 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5185 protected:
5186 G1CollectedHeap* _g1h;
5187 RefToScanQueueSet *_queues;
5188 ParallelTaskTerminator _terminator;
5189 uint _n_workers;
5190
5191 public:
5192 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5193 AbstractGangTask("ParPreserveCMReferents"),
5194 _g1h(g1h),
5195 _queues(task_queues),
5196 _terminator(workers, _queues),
5197 _n_workers(workers)
5198 { }
5199
5200 void work(uint worker_id) {
5201 ResourceMark rm;
5202 HandleMark hm;
5203
5204 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5205 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5206
5207 pss.set_evac_failure_closure(&evac_failure_cl);
5208
5209 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5210
5211 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5212
|
1071 hrrs->clear();
1072 // You might think here that we could clear just the cards
1073 // corresponding to the used region. But no: if we leave a dirty card
1074 // in a region we might allocate into, then it would prevent that card
1075 // from being enqueued, and cause it to be missed.
1076 // Re: the performance cost: we shouldn't be doing full GC anyway!
1077 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1078
1079 return false;
1080 }
1081 };
1082
1083 void G1CollectedHeap::clear_rsets_post_compaction() {
1084 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1085 heap_region_iterate(&rs_clear);
1086 }
1087
1088 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1089 G1CollectedHeap* _g1h;
1090 UpdateRSOopClosure _cl;
1091 uint _worker_i;
1092 public:
1093 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1094 _cl(g1->g1_rem_set(), worker_i),
1095 _worker_i(worker_i),
1096 _g1h(g1)
1097 { }
1098
1099 bool doHeapRegion(HeapRegion* r) {
1100 if (!r->is_continues_humongous()) {
1101 _cl.set_from(r);
1102 r->oop_iterate(&_cl);
1103 }
1104 return false;
1105 }
1106 };
1107
1108 class ParRebuildRSTask: public AbstractGangTask {
1109 G1CollectedHeap* _g1;
1110 HeapRegionClaimer _hrclaimer;
1111
1112 public:
1113 ParRebuildRSTask(G1CollectedHeap* g1) :
3056 }
3057
3058 bool failures = rootsCl.failures() || codeRootsCl.failures();
3059
3060 if (vo != VerifyOption_G1UseMarkWord) {
3061 // If we're verifying during a full GC then the region sets
3062 // will have been torn down at the start of the GC. Therefore
3063 // verifying the region sets will fail. So we only verify
3064 // the region sets when not in a full GC.
3065 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3066 verify_region_sets();
3067 }
3068
3069 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3070 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3071
3072 G1ParVerifyTask task(this, vo);
3073 assert(UseDynamicNumberOfGCThreads ||
3074 workers()->active_workers() == workers()->total_workers(),
3075 "If not dynamic should be using all the workers");
3076 uint n_workers = workers()->active_workers();
3077 set_par_threads(n_workers);
3078 workers()->run_task(&task);
3079 set_par_threads(0);
3080 if (task.failures()) {
3081 failures = true;
3082 }
3083
3084 } else {
3085 VerifyRegionClosure blk(false, vo);
3086 heap_region_iterate(&blk);
3087 if (blk.failures()) {
3088 failures = true;
3089 }
3090 }
3091
3092 if (G1StringDedup::is_enabled()) {
3093 if (!silent) gclog_or_tty->print("StrDedup ");
3094 G1StringDedup::verify();
3095 }
3096
3594 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3595
3596 // Here's a good place to add any other checks we'd like to
3597 // perform on CSet regions.
3598 return false;
3599 }
3600 };
3601 #endif // ASSERT
3602
3603 #if TASKQUEUE_STATS
3604 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3605 st->print_raw_cr("GC Task Stats");
3606 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3607 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3608 }
3609
3610 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3611 print_taskqueue_stats_hdr(st);
3612
3613 TaskQueueStats totals;
3614 const uint n = workers()->total_workers();
3615 for (uint i = 0; i < n; ++i) {
3616 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3617 totals += task_queue(i)->stats;
3618 }
3619 st->print_raw("tot "); totals.print(st); st->cr();
3620
3621 DEBUG_ONLY(totals.verify());
3622 }
3623
3624 void G1CollectedHeap::reset_taskqueue_stats() {
3625 const uint n = workers()->total_workers();
3626 for (uint i = 0; i < n; ++i) {
3627 task_queue(i)->stats.reset();
3628 }
3629 }
3630 #endif // TASKQUEUE_STATS
3631
3632 void G1CollectedHeap::log_gc_header() {
3633 if (!G1Log::fine()) {
3634 return;
3635 }
3636
3637 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3638
3639 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3640 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3641 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3642
3643 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3644 }
3645
3646 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
4353 Mutex* stats_lock() { return &_stats_lock; }
4354
4355 public:
4356 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
4357 : AbstractGangTask("G1 collection"),
4358 _g1h(g1h),
4359 _queues(task_queues),
4360 _root_processor(root_processor),
4361 _terminator(0, _queues),
4362 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4363 {}
4364
4365 RefToScanQueueSet* queues() { return _queues; }
4366
4367 RefToScanQueue *work_queue(int i) {
4368 return queues()->queue(i);
4369 }
4370
4371 ParallelTaskTerminator* terminator() { return &_terminator; }
4372
4373 virtual void set_for_termination(uint active_workers) {
4374 _root_processor->set_num_workers(active_workers);
4375 terminator()->reset_for_reuse(active_workers);
4376 _n_workers = active_workers;
4377 }
4378
4379 // Helps out with CLD processing.
4380 //
4381 // During InitialMark we need to:
4382 // 1) Scavenge all CLDs for the young GC.
4383 // 2) Mark all objects directly reachable from strong CLDs.
4384 template <G1Mark do_mark_object>
4385 class G1CLDClosure : public CLDClosure {
4386 G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
4387 G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
4388 G1KlassScanClosure _klass_in_cld_closure;
4389 bool _claim;
4390
4391 public:
4392 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4393 bool only_young, bool claim)
5033 _g1h(g1h),
5034 _par_scan_state(pss)
5035 { }
5036
5037 void do_void() {
5038 G1ParScanThreadState* const pss = par_scan_state();
5039 pss->trim_queue();
5040 }
5041 };
5042
5043 // Parallel Reference Processing closures
5044
5045 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5046 // processing during G1 evacuation pauses.
5047
5048 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5049 private:
5050 G1CollectedHeap* _g1h;
5051 RefToScanQueueSet* _queues;
5052 FlexibleWorkGang* _workers;
5053 uint _active_workers;
5054
5055 public:
5056 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5057 FlexibleWorkGang* workers,
5058 RefToScanQueueSet *task_queues,
5059 uint n_workers) :
5060 _g1h(g1h),
5061 _queues(task_queues),
5062 _workers(workers),
5063 _active_workers(n_workers)
5064 {
5065 assert(n_workers > 0, "shouldn't call this otherwise");
5066 }
5067
5068 // Executes the given task using concurrent marking worker threads.
5069 virtual void execute(ProcessTask& task);
5070 virtual void execute(EnqueueTask& task);
5071 };
5072
5073 // Gang task for possibly parallel reference processing
5074
5075 class G1STWRefProcTaskProxy: public AbstractGangTask {
5076 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5077 ProcessTask& _proc_task;
5078 G1CollectedHeap* _g1h;
5079 RefToScanQueueSet *_task_queues;
5172
5173 _g1h->set_par_threads(_active_workers);
5174 _workers->run_task(&enq_task_proxy);
5175 _g1h->set_par_threads(0);
5176 }
5177
5178 // End of weak reference support closures
5179
5180 // Abstract task used to preserve (i.e. copy) any referent objects
5181 // that are in the collection set and are pointed to by reference
5182 // objects discovered by the CM ref processor.
5183
5184 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5185 protected:
5186 G1CollectedHeap* _g1h;
5187 RefToScanQueueSet *_queues;
5188 ParallelTaskTerminator _terminator;
5189 uint _n_workers;
5190
5191 public:
5192 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, uint workers, RefToScanQueueSet *task_queues) :
5193 AbstractGangTask("ParPreserveCMReferents"),
5194 _g1h(g1h),
5195 _queues(task_queues),
5196 _terminator(workers, _queues),
5197 _n_workers(workers)
5198 { }
5199
5200 void work(uint worker_id) {
5201 ResourceMark rm;
5202 HandleMark hm;
5203
5204 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5205 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5206
5207 pss.set_evac_failure_closure(&evac_failure_cl);
5208
5209 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5210
5211 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5212
|