227 set_by_index(region, value);
228 }
229 bool is_candidate(uint region) {
230 return get_by_index(region);
231 }
232 };
233
234 HumongousReclaimCandidates _humongous_reclaim_candidates;
235 // Stores whether during humongous object registration we found candidate regions.
236 // If not, we can skip a few steps.
237 bool _has_humongous_reclaim_candidates;
238
239 volatile unsigned _gc_time_stamp;
240
241 G1HRPrinter _hr_printer;
242
243 // It decides whether an explicit GC should start a concurrent cycle
244 // instead of doing a STW GC. Currently, a concurrent cycle is
245 // explicitly started if:
246 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
247 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
248 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
249 // (d) cause == _g1_humongous_allocation
250 bool should_do_concurrent_full_gc(GCCause::Cause cause);
251
252 // indicates whether we are in young or mixed GC mode
253 G1CollectorState _collector_state;
254
255 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
256 // concurrent cycles) we have started.
257 volatile uint _old_marking_cycles_started;
258
259 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
260 // concurrent cycles) we have completed.
261 volatile uint _old_marking_cycles_completed;
262
263 bool _heap_summary_sent;
264
265 // This is a non-product method that is helpful for testing. It is
266 // called at the end of a GC and artificially expands the heap by
267 // allocating a number of dead regions. This way we can induce very
268 // frequent marking cycles and stress the cleanup / concurrent
269 // cleanup code more (as all the regions that will be allocated by
564 // Register the given region to be part of the collection set.
565 inline void register_humongous_region_with_cset(uint index);
566 // Register regions with humongous objects (actually on the start region) in
567 // the in_cset_fast_test table.
568 void register_humongous_regions_with_cset();
569 // We register a region with the fast "in collection set" test. We
570 // simply set to true the array slot corresponding to this region.
571 void register_young_region_with_cset(HeapRegion* r) {
572 _in_cset_fast_test.set_in_young(r->hrm_index());
573 }
574 void register_old_region_with_cset(HeapRegion* r) {
575 _in_cset_fast_test.set_in_old(r->hrm_index());
576 }
577 void clear_in_cset(const HeapRegion* hr) {
578 _in_cset_fast_test.clear(hr);
579 }
580
581 void clear_cset_fast_test() {
582 _in_cset_fast_test.clear();
583 }
584
585 // This is called at the start of either a concurrent cycle or a Full
586 // GC to update the number of old marking cycles started.
587 void increment_old_marking_cycles_started();
588
589 // This is called at the end of either a concurrent cycle or a Full
590 // GC to update the number of old marking cycles completed. Those two
591 // can happen in a nested fashion, i.e., we start a concurrent
592 // cycle, a Full GC happens half-way through it which ends first,
593 // and then the cycle notices that a Full GC happened and ends
594 // too. The concurrent parameter is a boolean to help us do a bit
595 // tighter consistency checking in the method. If concurrent is
596 // false, the caller is the inner caller in the nesting (i.e., the
597 // Full GC). If concurrent is true, the caller is the outer caller
598 // in this nesting (i.e., the concurrent cycle). Further nesting is
599 // not currently supported. The end of this call also notifies
600 // the FullGCCount_lock in case a Java thread is waiting for a full
601 // GC to happen (e.g., it called System.gc() with
602 // +ExplicitGCInvokesConcurrent).
603 void increment_old_marking_cycles_completed(bool concurrent);
|
227 set_by_index(region, value);
228 }
229 bool is_candidate(uint region) {
230 return get_by_index(region);
231 }
232 };
233
234 HumongousReclaimCandidates _humongous_reclaim_candidates;
235 // Stores whether during humongous object registration we found candidate regions.
236 // If not, we can skip a few steps.
237 bool _has_humongous_reclaim_candidates;
238
239 volatile unsigned _gc_time_stamp;
240
241 G1HRPrinter _hr_printer;
242
243 // It decides whether an explicit GC should start a concurrent cycle
244 // instead of doing a STW GC. Currently, a concurrent cycle is
245 // explicitly started if:
246 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
247 // (b) cause == _g1_humongous_allocation
248 // These are defined in user_requested_concurrent_full_gc() because
249 // these sometimes need to be treated differently:
250 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
251 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
252 // (e) cause == _update_allocation_context_stats_inc
253 // (f) cause == _wb_conc_mark
254 bool should_do_concurrent_full_gc(GCCause::Cause cause);
255
256 // indicates whether we are in young or mixed GC mode
257 G1CollectorState _collector_state;
258
259 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
260 // concurrent cycles) we have started.
261 volatile uint _old_marking_cycles_started;
262
263 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
264 // concurrent cycles) we have completed.
265 volatile uint _old_marking_cycles_completed;
266
267 bool _heap_summary_sent;
268
269 // This is a non-product method that is helpful for testing. It is
270 // called at the end of a GC and artificially expands the heap by
271 // allocating a number of dead regions. This way we can induce very
272 // frequent marking cycles and stress the cleanup / concurrent
273 // cleanup code more (as all the regions that will be allocated by
568 // Register the given region to be part of the collection set.
569 inline void register_humongous_region_with_cset(uint index);
570 // Register regions with humongous objects (actually on the start region) in
571 // the in_cset_fast_test table.
572 void register_humongous_regions_with_cset();
573 // We register a region with the fast "in collection set" test. We
574 // simply set to true the array slot corresponding to this region.
575 void register_young_region_with_cset(HeapRegion* r) {
576 _in_cset_fast_test.set_in_young(r->hrm_index());
577 }
578 void register_old_region_with_cset(HeapRegion* r) {
579 _in_cset_fast_test.set_in_old(r->hrm_index());
580 }
581 void clear_in_cset(const HeapRegion* hr) {
582 _in_cset_fast_test.clear(hr);
583 }
584
585 void clear_cset_fast_test() {
586 _in_cset_fast_test.clear();
587 }
588
589 // A complement to should_do_concurrent_full_gc. GCs caused
590 // by a user sometimes needs to be treated differently
591 // from those caused by the VM.
592 bool user_requested_concurrent_full_gc(GCCause::Cause cause);
593
594 // This is called at the start of either a concurrent cycle or a Full
595 // GC to update the number of old marking cycles started.
596 void increment_old_marking_cycles_started();
597
598 // This is called at the end of either a concurrent cycle or a Full
599 // GC to update the number of old marking cycles completed. Those two
600 // can happen in a nested fashion, i.e., we start a concurrent
601 // cycle, a Full GC happens half-way through it which ends first,
602 // and then the cycle notices that a Full GC happened and ends
603 // too. The concurrent parameter is a boolean to help us do a bit
604 // tighter consistency checking in the method. If concurrent is
605 // false, the caller is the inner caller in the nesting (i.e., the
606 // Full GC). If concurrent is true, the caller is the outer caller
607 // in this nesting (i.e., the concurrent cycle). Further nesting is
608 // not currently supported. The end of this call also notifies
609 // the FullGCCount_lock in case a Java thread is waiting for a full
610 // GC to happen (e.g., it called System.gc() with
611 // +ExplicitGCInvokesConcurrent).
612 void increment_old_marking_cycles_completed(bool concurrent);
|