226 // candidates removed from the set as they are found reachable from
227 // roots or the young generation.
228 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
229 protected:
230 bool default_value() const { return false; }
231 public:
232 void clear() { G1BiasedMappedArray<bool>::clear(); }
233 void set_candidate(uint region, bool value) {
234 set_by_index(region, value);
235 }
236 bool is_candidate(uint region) {
237 return get_by_index(region);
238 }
239 };
240
241 HumongousReclaimCandidates _humongous_reclaim_candidates;
242 // Stores whether during humongous object registration we found candidate regions.
243 // If not, we can skip a few steps.
244 bool _has_humongous_reclaim_candidates;
245
246 volatile unsigned _gc_time_stamp;
247
248 G1HRPrinter _hr_printer;
249
250 // It decides whether an explicit GC should start a concurrent cycle
251 // instead of doing a STW GC. Currently, a concurrent cycle is
252 // explicitly started if:
253 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
254 // (b) cause == _g1_humongous_allocation
255 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
256 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
257 // (e) cause == _update_allocation_context_stats_inc
258 // (f) cause == _wb_conc_mark
259 bool should_do_concurrent_full_gc(GCCause::Cause cause);
260
261 // indicates whether we are in young or mixed GC mode
262 G1CollectorState _collector_state;
263
264 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
265 // concurrent cycles) we have started.
266 volatile uint _old_marking_cycles_started;
978 const G1CollectorState* collector_state() const { return &_collector_state; }
979 G1CollectorState* collector_state() { return &_collector_state; }
980
981 // The current policy object for the collector.
982 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
983
984 const G1CollectionSet* collection_set() const { return &_collection_set; }
985 G1CollectionSet* collection_set() { return &_collection_set; }
986
987 virtual CollectorPolicy* collector_policy() const;
988
989 // Adaptive size policy. No such thing for g1.
990 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
991
992 // The rem set and barrier set.
993 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
994
995 // Try to minimize the remembered set.
996 void scrub_rem_set();
997
998 unsigned get_gc_time_stamp() {
999 return _gc_time_stamp;
1000 }
1001
1002 inline void reset_gc_time_stamp();
1003
1004 void check_gc_time_stamps() PRODUCT_RETURN;
1005
1006 inline void increment_gc_time_stamp();
1007
1008 // Reset the given region's GC timestamp. If it's starts humongous,
1009 // also reset the GC timestamp of its corresponding
1010 // continues humongous regions too.
1011 void reset_gc_time_stamps(HeapRegion* hr);
1012
1013 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1014 void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
1015
1016 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
1017 void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
1018
|
226 // candidates removed from the set as they are found reachable from
227 // roots or the young generation.
228 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
229 protected:
230 bool default_value() const { return false; }
231 public:
232 void clear() { G1BiasedMappedArray<bool>::clear(); }
233 void set_candidate(uint region, bool value) {
234 set_by_index(region, value);
235 }
236 bool is_candidate(uint region) {
237 return get_by_index(region);
238 }
239 };
240
241 HumongousReclaimCandidates _humongous_reclaim_candidates;
242 // Stores whether during humongous object registration we found candidate regions.
243 // If not, we can skip a few steps.
244 bool _has_humongous_reclaim_candidates;
245
246 volatile uint _gc_time_stamp;
247
248 G1HRPrinter _hr_printer;
249
250 // It decides whether an explicit GC should start a concurrent cycle
251 // instead of doing a STW GC. Currently, a concurrent cycle is
252 // explicitly started if:
253 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
254 // (b) cause == _g1_humongous_allocation
255 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
256 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
257 // (e) cause == _update_allocation_context_stats_inc
258 // (f) cause == _wb_conc_mark
259 bool should_do_concurrent_full_gc(GCCause::Cause cause);
260
261 // indicates whether we are in young or mixed GC mode
262 G1CollectorState _collector_state;
263
264 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
265 // concurrent cycles) we have started.
266 volatile uint _old_marking_cycles_started;
978 const G1CollectorState* collector_state() const { return &_collector_state; }
979 G1CollectorState* collector_state() { return &_collector_state; }
980
981 // The current policy object for the collector.
982 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
983
984 const G1CollectionSet* collection_set() const { return &_collection_set; }
985 G1CollectionSet* collection_set() { return &_collection_set; }
986
987 virtual CollectorPolicy* collector_policy() const;
988
989 // Adaptive size policy. No such thing for g1.
990 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
991
992 // The rem set and barrier set.
993 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
994
995 // Try to minimize the remembered set.
996 void scrub_rem_set();
997
998 uint get_gc_time_stamp() {
999 return _gc_time_stamp;
1000 }
1001
1002 inline void reset_gc_time_stamp();
1003
1004 void check_gc_time_stamps() PRODUCT_RETURN;
1005
1006 inline void increment_gc_time_stamp();
1007
1008 // Reset the given region's GC timestamp. If it's starts humongous,
1009 // also reset the GC timestamp of its corresponding
1010 // continues humongous regions too.
1011 void reset_gc_time_stamps(HeapRegion* hr);
1012
1013 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1014 void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
1015
1016 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
1017 void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
1018
|