304 HumongousIsLiveBiasedMappedArray _humongous_is_live;
305 // Stores whether during humongous object registration we found candidate regions.
306 // If not, we can skip a few steps.
307 bool _has_humongous_reclaim_candidates;
308
309 volatile unsigned _gc_time_stamp;
310
311 size_t* _surviving_young_words;
312
313 G1HRPrinter _hr_printer;
314
315 void setup_surviving_young_words();
316 void update_surviving_young_words(size_t* surv_young_words);
317 void cleanup_surviving_young_words();
318
319 // It decides whether an explicit GC should start a concurrent cycle
320 // instead of doing a STW GC. Currently, a concurrent cycle is
321 // explicitly started if:
322 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
323 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
324 // (c) cause == _g1_humongous_allocation
325 bool should_do_concurrent_full_gc(GCCause::Cause cause);
326
327 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
328 // concurrent cycles) we have started.
329 volatile uint _old_marking_cycles_started;
330
331 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
332 // concurrent cycles) we have completed.
333 volatile uint _old_marking_cycles_completed;
334
335 bool _concurrent_cycle_started;
336 bool _heap_summary_sent;
337
338 // This is a non-product method that is helpful for testing. It is
339 // called at the end of a GC and artificially expands the heap by
340 // allocating a number of dead regions. This way we can induce very
341 // frequent marking cycles and stress the cleanup / concurrent
342 // cleanup code more (as all the regions that will be allocated by
343 // this method will be found dead by the marking cycle).
344 void allocate_dummy_regions() PRODUCT_RETURN;
|
304 HumongousIsLiveBiasedMappedArray _humongous_is_live;
305 // Stores whether during humongous object registration we found candidate regions.
306 // If not, we can skip a few steps.
307 bool _has_humongous_reclaim_candidates;
308
309 volatile unsigned _gc_time_stamp;
310
311 size_t* _surviving_young_words;
312
313 G1HRPrinter _hr_printer;
314
315 void setup_surviving_young_words();
316 void update_surviving_young_words(size_t* surv_young_words);
317 void cleanup_surviving_young_words();
318
319 // It decides whether an explicit GC should start a concurrent cycle
320 // instead of doing a STW GC. Currently, a concurrent cycle is
321 // explicitly started if:
322 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
323 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
324 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
325 // (d) cause == _g1_humongous_allocation
326 bool should_do_concurrent_full_gc(GCCause::Cause cause);
327
328 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
329 // concurrent cycles) we have started.
330 volatile uint _old_marking_cycles_started;
331
332 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
333 // concurrent cycles) we have completed.
334 volatile uint _old_marking_cycles_completed;
335
336 bool _concurrent_cycle_started;
337 bool _heap_summary_sent;
338
339 // This is a non-product method that is helpful for testing. It is
340 // called at the end of a GC and artificially expands the heap by
341 // allocating a number of dead regions. This way we can induce very
342 // frequent marking cycles and stress the cleanup / concurrent
343 // cleanup code more (as all the regions that will be allocated by
344 // this method will be found dead by the marking cycle).
345 void allocate_dummy_regions() PRODUCT_RETURN;
|