227 set_by_index(region, value);
228 }
229 bool is_candidate(uint region) {
230 return get_by_index(region);
231 }
232 };
233
234 HumongousReclaimCandidates _humongous_reclaim_candidates;
235 // Stores whether during humongous object registration we found candidate regions.
236 // If not, we can skip a few steps.
237 bool _has_humongous_reclaim_candidates;
238
239 volatile unsigned _gc_time_stamp;
240
241 G1HRPrinter _hr_printer;
242
243 // It decides whether an explicit GC should start a concurrent cycle
244 // instead of doing a STW GC. Currently, a concurrent cycle is
245 // explicitly started if:
246 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
247 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
248 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
249 // (d) cause == _g1_humongous_allocation
250 bool should_do_concurrent_full_gc(GCCause::Cause cause);
251
252 // indicates whether we are in young or mixed GC mode
253 G1CollectorState _collector_state;
254
255 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
256 // concurrent cycles) we have started.
257 volatile uint _old_marking_cycles_started;
258
259 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
260 // concurrent cycles) we have completed.
261 volatile uint _old_marking_cycles_completed;
262
263 bool _heap_summary_sent;
264
265 // This is a non-product method that is helpful for testing. It is
266 // called at the end of a GC and artificially expands the heap by
267 // allocating a number of dead regions. This way we can induce very
268 // frequent marking cycles and stress the cleanup / concurrent
269 // cleanup code more (as all the regions that will be allocated by
558 // Register the given region to be part of the collection set.
559 inline void register_humongous_region_with_cset(uint index);
560 // Register regions with humongous objects (actually on the start region) in
561 // the in_cset_fast_test table.
562 void register_humongous_regions_with_cset();
563 // We register a region with the fast "in collection set" test. We
564 // simply set to true the array slot corresponding to this region.
565 void register_young_region_with_cset(HeapRegion* r) {
566 _in_cset_fast_test.set_in_young(r->hrm_index());
567 }
568 void register_old_region_with_cset(HeapRegion* r) {
569 _in_cset_fast_test.set_in_old(r->hrm_index());
570 }
571 void clear_in_cset(const HeapRegion* hr) {
572 _in_cset_fast_test.clear(hr);
573 }
574
575 void clear_cset_fast_test() {
576 _in_cset_fast_test.clear();
577 }
578
579 // This is called at the start of either a concurrent cycle or a Full
580 // GC to update the number of old marking cycles started.
581 void increment_old_marking_cycles_started();
582
583 // This is called at the end of either a concurrent cycle or a Full
584 // GC to update the number of old marking cycles completed. Those two
585 // can happen in a nested fashion, i.e., we start a concurrent
586 // cycle, a Full GC happens half-way through it which ends first,
587 // and then the cycle notices that a Full GC happened and ends
588 // too. The concurrent parameter is a boolean to help us do a bit
589 // tighter consistency checking in the method. If concurrent is
590 // false, the caller is the inner caller in the nesting (i.e., the
591 // Full GC). If concurrent is true, the caller is the outer caller
592 // in this nesting (i.e., the concurrent cycle). Further nesting is
593 // not currently supported. The end of this call also notifies
594 // the FullGCCount_lock in case a Java thread is waiting for a full
595 // GC to happen (e.g., it called System.gc() with
596 // +ExplicitGCInvokesConcurrent).
597 void increment_old_marking_cycles_completed(bool concurrent);
|
227 set_by_index(region, value);
228 }
229 bool is_candidate(uint region) {
230 return get_by_index(region);
231 }
232 };
233
234 HumongousReclaimCandidates _humongous_reclaim_candidates;
235 // Stores whether during humongous object registration we found candidate regions.
236 // If not, we can skip a few steps.
237 bool _has_humongous_reclaim_candidates;
238
239 volatile unsigned _gc_time_stamp;
240
241 G1HRPrinter _hr_printer;
242
243 // It decides whether an explicit GC should start a concurrent cycle
244 // instead of doing a STW GC. Currently, a concurrent cycle is
245 // explicitly started if:
246 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
247 // (b) cause == _g1_humongous_allocation
248 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
249 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
250 // (e) cause == _update_allocation_context_stats_inc
251 // (f) cause == _wb_conc_mark
252 bool should_do_concurrent_full_gc(GCCause::Cause cause);
253
254 // indicates whether we are in young or mixed GC mode
255 G1CollectorState _collector_state;
256
257 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
258 // concurrent cycles) we have started.
259 volatile uint _old_marking_cycles_started;
260
261 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
262 // concurrent cycles) we have completed.
263 volatile uint _old_marking_cycles_completed;
264
265 bool _heap_summary_sent;
266
267 // This is a non-product method that is helpful for testing. It is
268 // called at the end of a GC and artificially expands the heap by
269 // allocating a number of dead regions. This way we can induce very
270 // frequent marking cycles and stress the cleanup / concurrent
271 // cleanup code more (as all the regions that will be allocated by
560 // Register the given region to be part of the collection set.
561 inline void register_humongous_region_with_cset(uint index);
562 // Register regions with humongous objects (actually on the start region) in
563 // the in_cset_fast_test table.
564 void register_humongous_regions_with_cset();
565 // We register a region with the fast "in collection set" test. We
566 // simply set to true the array slot corresponding to this region.
567 void register_young_region_with_cset(HeapRegion* r) {
568 _in_cset_fast_test.set_in_young(r->hrm_index());
569 }
570 void register_old_region_with_cset(HeapRegion* r) {
571 _in_cset_fast_test.set_in_old(r->hrm_index());
572 }
573 void clear_in_cset(const HeapRegion* hr) {
574 _in_cset_fast_test.clear(hr);
575 }
576
577 void clear_cset_fast_test() {
578 _in_cset_fast_test.clear();
579 }
580
581 bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
582
583 // This is called at the start of either a concurrent cycle or a Full
584 // GC to update the number of old marking cycles started.
585 void increment_old_marking_cycles_started();
586
587 // This is called at the end of either a concurrent cycle or a Full
588 // GC to update the number of old marking cycles completed. Those two
589 // can happen in a nested fashion, i.e., we start a concurrent
590 // cycle, a Full GC happens half-way through it which ends first,
591 // and then the cycle notices that a Full GC happened and ends
592 // too. The concurrent parameter is a boolean to help us do a bit
593 // tighter consistency checking in the method. If concurrent is
594 // false, the caller is the inner caller in the nesting (i.e., the
595 // Full GC). If concurrent is true, the caller is the outer caller
596 // in this nesting (i.e., the concurrent cycle). Further nesting is
597 // not currently supported. The end of this call also notifies
598 // the FullGCCount_lock in case a Java thread is waiting for a full
599 // GC to happen (e.g., it called System.gc() with
600 // +ExplicitGCInvokesConcurrent).
601 void increment_old_marking_cycles_completed(bool concurrent);
|