228 set_by_index(region, value);
229 }
230 bool is_candidate(uint region) {
231 return get_by_index(region);
232 }
233 };
234
235 HumongousReclaimCandidates _humongous_reclaim_candidates;
236 // Stores whether during humongous object registration we found candidate regions.
237 // If not, we can skip a few steps.
238 bool _has_humongous_reclaim_candidates;
239
240 volatile unsigned _gc_time_stamp;
241
242 G1HRPrinter _hr_printer;
243
244 // It decides whether an explicit GC should start a concurrent cycle
245 // instead of doing a STW GC. Currently, a concurrent cycle is
246 // explicitly started if:
247 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
248 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
249 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
250 // (d) cause == _g1_humongous_allocation
251 bool should_do_concurrent_full_gc(GCCause::Cause cause);
252
253 // indicates whether we are in young or mixed GC mode
254 G1CollectorState _collector_state;
255
256 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
257 // concurrent cycles) we have started.
258 volatile uint _old_marking_cycles_started;
259
260 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
261 // concurrent cycles) we have completed.
262 volatile uint _old_marking_cycles_completed;
263
264 bool _heap_summary_sent;
265
266 // This is a non-product method that is helpful for testing. It is
267 // called at the end of a GC and artificially expands the heap by
268 // allocating a number of dead regions. This way we can induce very
269 // frequent marking cycles and stress the cleanup / concurrent
270 // cleanup code more (as all the regions that will be allocated by
561 // Register the given region to be part of the collection set.
562 inline void register_humongous_region_with_cset(uint index);
563 // Register regions with humongous objects (actually on the start region) in
564 // the in_cset_fast_test table.
565 void register_humongous_regions_with_cset();
566 // We register a region with the fast "in collection set" test. We
567 // simply set to true the array slot corresponding to this region.
568 void register_young_region_with_cset(HeapRegion* r) {
569 _in_cset_fast_test.set_in_young(r->hrm_index());
570 }
571 void register_old_region_with_cset(HeapRegion* r) {
572 _in_cset_fast_test.set_in_old(r->hrm_index());
573 }
574 void clear_in_cset(const HeapRegion* hr) {
575 _in_cset_fast_test.clear(hr);
576 }
577
578 void clear_cset_fast_test() {
579 _in_cset_fast_test.clear();
580 }
581
582 // This is called at the start of either a concurrent cycle or a Full
583 // GC to update the number of old marking cycles started.
584 void increment_old_marking_cycles_started();
585
586 // This is called at the end of either a concurrent cycle or a Full
587 // GC to update the number of old marking cycles completed. Those two
588 // can happen in a nested fashion, i.e., we start a concurrent
589 // cycle, a Full GC happens half-way through it which ends first,
590 // and then the cycle notices that a Full GC happened and ends
591 // too. The concurrent parameter is a boolean to help us do a bit
592 // tighter consistency checking in the method. If concurrent is
593 // false, the caller is the inner caller in the nesting (i.e., the
594 // Full GC). If concurrent is true, the caller is the outer caller
595 // in this nesting (i.e., the concurrent cycle). Further nesting is
596 // not currently supported. The end of this call also notifies
597 // the FullGCCount_lock in case a Java thread is waiting for a full
598 // GC to happen (e.g., it called System.gc() with
599 // +ExplicitGCInvokesConcurrent).
600 void increment_old_marking_cycles_completed(bool concurrent);
|
228 set_by_index(region, value);
229 }
230 bool is_candidate(uint region) {
231 return get_by_index(region);
232 }
233 };
234
235 HumongousReclaimCandidates _humongous_reclaim_candidates;
236 // Stores whether during humongous object registration we found candidate regions.
237 // If not, we can skip a few steps.
238 bool _has_humongous_reclaim_candidates;
239
240 volatile unsigned _gc_time_stamp;
241
242 G1HRPrinter _hr_printer;
243
244 // It decides whether an explicit GC should start a concurrent cycle
245 // instead of doing a STW GC. Currently, a concurrent cycle is
246 // explicitly started if:
247 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
248 // (b) cause == _g1_humongous_allocation
249 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
250 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
251 // (e) cause == _update_allocation_context_stats_inc
252 // (f) cause == _wb_conc_mark
253 bool should_do_concurrent_full_gc(GCCause::Cause cause);
254
255 // indicates whether we are in young or mixed GC mode
256 G1CollectorState _collector_state;
257
258 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
259 // concurrent cycles) we have started.
260 volatile uint _old_marking_cycles_started;
261
262 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
263 // concurrent cycles) we have completed.
264 volatile uint _old_marking_cycles_completed;
265
266 bool _heap_summary_sent;
267
268 // This is a non-product method that is helpful for testing. It is
269 // called at the end of a GC and artificially expands the heap by
270 // allocating a number of dead regions. This way we can induce very
271 // frequent marking cycles and stress the cleanup / concurrent
272 // cleanup code more (as all the regions that will be allocated by
563 // Register the given region to be part of the collection set.
564 inline void register_humongous_region_with_cset(uint index);
565 // Register regions with humongous objects (actually on the start region) in
566 // the in_cset_fast_test table.
567 void register_humongous_regions_with_cset();
568 // We register a region with the fast "in collection set" test. We
569 // simply set to true the array slot corresponding to this region.
570 void register_young_region_with_cset(HeapRegion* r) {
571 _in_cset_fast_test.set_in_young(r->hrm_index());
572 }
573 void register_old_region_with_cset(HeapRegion* r) {
574 _in_cset_fast_test.set_in_old(r->hrm_index());
575 }
576 void clear_in_cset(const HeapRegion* hr) {
577 _in_cset_fast_test.clear(hr);
578 }
579
580 void clear_cset_fast_test() {
581 _in_cset_fast_test.clear();
582 }
583
584 bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
585
586 // This is called at the start of either a concurrent cycle or a Full
587 // GC to update the number of old marking cycles started.
588 void increment_old_marking_cycles_started();
589
590 // This is called at the end of either a concurrent cycle or a Full
591 // GC to update the number of old marking cycles completed. Those two
592 // can happen in a nested fashion, i.e., we start a concurrent
593 // cycle, a Full GC happens half-way through it which ends first,
594 // and then the cycle notices that a Full GC happened and ends
595 // too. The concurrent parameter is a boolean to help us do a bit
596 // tighter consistency checking in the method. If concurrent is
597 // false, the caller is the inner caller in the nesting (i.e., the
598 // Full GC). If concurrent is true, the caller is the outer caller
599 // in this nesting (i.e., the concurrent cycle). Further nesting is
600 // not currently supported. The end of this call also notifies
601 // the FullGCCount_lock in case a Java thread is waiting for a full
602 // GC to happen (e.g., it called System.gc() with
603 // +ExplicitGCInvokesConcurrent).
604 void increment_old_marking_cycles_completed(bool concurrent);
|