172 private:
173 void reset_from_card_cache(uint start_idx, size_t num_regions);
174 public:
175 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
176 };
177
178 class G1CollectedHeap : public SharedHeap {
179 friend class VM_CollectForMetadataAllocation;
180 friend class VM_G1CollectForAllocation;
181 friend class VM_G1CollectFull;
182 friend class VM_G1IncCollectionPause;
183 friend class VMStructs;
184 friend class MutatorAllocRegion;
185 friend class SurvivorGCAllocRegion;
186 friend class OldGCAllocRegion;
187 friend class G1Allocator;
188
189 // Closures used in implementation.
190 friend class G1ParScanThreadState;
191 friend class G1ParTask;
192 friend class G1ParGCAllocator;
193 friend class G1PrepareCompactClosure;
194
195 // Other related classes.
196 friend class HeapRegionClaimer;
197
198 // Testing classes.
199 friend class G1CheckCSetFastTableClosure;
200
201 private:
202 // The one and only G1CollectedHeap, so static functions can find it.
203 static G1CollectedHeap* _g1h;
204
205 static size_t _humongous_object_threshold_in_words;
206
207 // The secondary free list which contains regions that have been
208 // freed up during the cleanup process. This will be appended to
209 // the master free list when appropriate.
210 FreeRegionList _secondary_free_list;
211
212 // It keeps track of the old regions.
229 // only exception is the humongous set which we leave unaltered. If
230 // free_list_only is true, it will only tear down the master free
231 // list. It is called before a Full GC (free_list_only == false) or
232 // before heap shrinking (free_list_only == true).
233 void tear_down_region_sets(bool free_list_only);
234
235 // Rebuilds the region sets / lists so that they are repopulated to
236 // reflect the contents of the heap. The only exception is the
237 // humongous set which was not torn down in the first place. If
238 // free_list_only is true, it will only rebuild the master free
239 // list. It is called after a Full GC (free_list_only == false) or
240 // after heap shrinking (free_list_only == true).
241 void rebuild_region_sets(bool free_list_only);
242
243 // Callback for region mapping changed events.
244 G1RegionMappingChangedListener _listener;
245
246 // The sequence of all heap regions in the heap.
247 HeapRegionManager _hrm;
248
249 // Class that handles the different kinds of allocations.
250 G1Allocator* _allocator;
251
252 // Statistics for each allocation context
253 AllocationContextStats _allocation_context_stats;
254
255 // PLAB sizing policy for survivors.
256 PLABStats _survivor_plab_stats;
257
258 // PLAB sizing policy for tenured objects.
259 PLABStats _old_plab_stats;
260
261 // It specifies whether we should attempt to expand the heap after a
262 // region allocation failure. If heap expansion fails we set this to
263 // false so that we don't re-attempt the heap expansion (it's likely
264 // that subsequent expansion attempts will also fail if one fails).
265 // Currently, it is only consulted during GC and it's reset at the
266 // start of each GC.
267 bool _expand_heap_after_alloc_failure;
268
269 // It resets the mutator alloc region before new allocations can take place.
270 void init_mutator_alloc_region();
271
272 // It releases the mutator alloc region.
273 void release_mutator_alloc_region();
274
275 // It initializes the GC alloc regions at the start of a GC.
276 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
277
278 // It releases the GC alloc regions at the end of a GC.
279 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
280
281 // It does any cleanup that needs to be done on the GC alloc regions
282 // before a Full GC.
283 void abandon_gc_alloc_regions();
284
285 // Helper for monitoring and management support.
286 G1MonitoringSupport* _g1mm;
287
288 // Records whether the region at the given index is kept live by roots or
289 // references from the young generation.
290 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
291 protected:
292 bool default_value() const { return false; }
293 public:
294 void clear() { G1BiasedMappedArray<bool>::clear(); }
295 void set_live(uint region) {
296 set_by_index(region, true);
297 }
298 bool is_live(uint region) {
299 return get_by_index(region);
300 }
301 };
302
303 HumongousIsLiveBiasedMappedArray _humongous_is_live;
304 // Stores whether during humongous object registration we found candidate regions.
681 // Full GC). If concurrent is true, the caller is the outer caller
682 // in this nesting (i.e., the concurrent cycle). Further nesting is
683 // not currently supported. The end of this call also notifies
684 // the FullGCCount_lock in case a Java thread is waiting for a full
685 // GC to happen (e.g., it called System.gc() with
686 // +ExplicitGCInvokesConcurrent).
687 void increment_old_marking_cycles_completed(bool concurrent);
688
689 uint old_marking_cycles_completed() {
690 return _old_marking_cycles_completed;
691 }
692
693 void register_concurrent_cycle_start(const Ticks& start_time);
694 void register_concurrent_cycle_end();
695 void trace_heap_after_concurrent_cycle();
696
697 G1YCType yc_type();
698
699 G1HRPrinter* hr_printer() { return &_hr_printer; }
700
701 // Frees a non-humongous region by initializing its contents and
702 // adding it to the free list that's passed as a parameter (this is
703 // usually a local list which will be appended to the master free
704 // list later). The used bytes of freed regions are accumulated in
705 // pre_used. If par is true, the region's RSet will not be freed
706 // up. The assumption is that this will be done later.
707 // The locked parameter indicates if the caller has already taken
708 // care of proper synchronization. This may allow some optimizations.
709 void free_region(HeapRegion* hr,
710 FreeRegionList* free_list,
711 bool par,
712 bool locked = false);
713
714 // Frees a humongous region by collapsing it into individual regions
715 // and calling free_region() for each of them. The freed regions
716 // will be added to the free list that's passed as a parameter (this
717 // is usually a local list which will be appended to the master free
718 // list later). The used bytes of freed regions are accumulated in
719 // pre_used. If par is true, the region's RSet will not be freed
720 // up. The assumption is that this will be done later.
1082 // The shared block offset table array.
1083 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1084
1085 // Reference Processing accessors
1086
1087 // The STW reference processor....
1088 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1089
1090 // The Concurrent Marking reference processor...
1091 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1092
1093 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1094 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1095
1096 virtual size_t capacity() const;
1097 virtual size_t used() const;
1098 // This should be called when we're not holding the heap lock. The
1099 // result might be a bit inaccurate.
1100 size_t used_unlocked() const;
1101 size_t recalculate_used() const;
1102
1103 // These virtual functions do the actual allocation.
1104 // Some heaps may offer a contiguous region for shared non-blocking
1105 // allocation, via inlined code (by exporting the address of the top and
1106 // end fields defining the extent of the contiguous allocation region.)
1107 // But G1CollectedHeap doesn't yet support this.
1108
1109 virtual bool is_maximal_no_gc() const {
1110 return _hrm.available() == 0;
1111 }
1112
1113 // The current number of regions in the heap.
1114 uint num_regions() const { return _hrm.length(); }
1115
1116 // The max number of regions in the heap.
1117 uint max_regions() const { return _hrm.max_length(); }
1118
1119 // The number of regions that are completely free.
1120 uint num_free_regions() const { return _hrm.num_free_regions(); }
1121
|
172 private:
173 void reset_from_card_cache(uint start_idx, size_t num_regions);
174 public:
175 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
176 };
177
178 class G1CollectedHeap : public SharedHeap {
179 friend class VM_CollectForMetadataAllocation;
180 friend class VM_G1CollectForAllocation;
181 friend class VM_G1CollectFull;
182 friend class VM_G1IncCollectionPause;
183 friend class VMStructs;
184 friend class MutatorAllocRegion;
185 friend class SurvivorGCAllocRegion;
186 friend class OldGCAllocRegion;
187 friend class G1Allocator;
188
189 // Closures used in implementation.
190 friend class G1ParScanThreadState;
191 friend class G1ParTask;
192 friend class G1PLABAllocator;
193 friend class G1PrepareCompactClosure;
194
195 // Other related classes.
196 friend class HeapRegionClaimer;
197
198 // Testing classes.
199 friend class G1CheckCSetFastTableClosure;
200
201 private:
202 // The one and only G1CollectedHeap, so static functions can find it.
203 static G1CollectedHeap* _g1h;
204
205 static size_t _humongous_object_threshold_in_words;
206
207 // The secondary free list which contains regions that have been
208 // freed up during the cleanup process. This will be appended to
209 // the master free list when appropriate.
210 FreeRegionList _secondary_free_list;
211
212 // It keeps track of the old regions.
229 // only exception is the humongous set which we leave unaltered. If
230 // free_list_only is true, it will only tear down the master free
231 // list. It is called before a Full GC (free_list_only == false) or
232 // before heap shrinking (free_list_only == true).
233 void tear_down_region_sets(bool free_list_only);
234
235 // Rebuilds the region sets / lists so that they are repopulated to
236 // reflect the contents of the heap. The only exception is the
237 // humongous set which was not torn down in the first place. If
238 // free_list_only is true, it will only rebuild the master free
239 // list. It is called after a Full GC (free_list_only == false) or
240 // after heap shrinking (free_list_only == true).
241 void rebuild_region_sets(bool free_list_only);
242
243 // Callback for region mapping changed events.
244 G1RegionMappingChangedListener _listener;
245
246 // The sequence of all heap regions in the heap.
247 HeapRegionManager _hrm;
248
249 // Handles the different kinds of allocations within a region.
250 G1Allocator* _allocator;
251
252 // Outside of GC pauses, the number of bytes used in all regions other
253 // than the current allocation region(s).
254 size_t _summary_bytes_used;
255
256 // Statistics for each allocation context
257 AllocationContextStats _allocation_context_stats;
258
259 // PLAB sizing policy for survivors.
260 PLABStats _survivor_plab_stats;
261
262 // PLAB sizing policy for tenured objects.
263 PLABStats _old_plab_stats;
264
265 // It specifies whether we should attempt to expand the heap after a
266 // region allocation failure. If heap expansion fails we set this to
267 // false so that we don't re-attempt the heap expansion (it's likely
268 // that subsequent expansion attempts will also fail if one fails).
269 // Currently, it is only consulted during GC and it's reset at the
270 // start of each GC.
271 bool _expand_heap_after_alloc_failure;
272
273 // Helper for monitoring and management support.
274 G1MonitoringSupport* _g1mm;
275
276 // Records whether the region at the given index is kept live by roots or
277 // references from the young generation.
278 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
279 protected:
280 bool default_value() const { return false; }
281 public:
282 void clear() { G1BiasedMappedArray<bool>::clear(); }
283 void set_live(uint region) {
284 set_by_index(region, true);
285 }
286 bool is_live(uint region) {
287 return get_by_index(region);
288 }
289 };
290
291 HumongousIsLiveBiasedMappedArray _humongous_is_live;
292 // Stores whether during humongous object registration we found candidate regions.
669 // Full GC). If concurrent is true, the caller is the outer caller
670 // in this nesting (i.e., the concurrent cycle). Further nesting is
671 // not currently supported. The end of this call also notifies
672 // the FullGCCount_lock in case a Java thread is waiting for a full
673 // GC to happen (e.g., it called System.gc() with
674 // +ExplicitGCInvokesConcurrent).
675 void increment_old_marking_cycles_completed(bool concurrent);
676
677 uint old_marking_cycles_completed() {
678 return _old_marking_cycles_completed;
679 }
680
681 void register_concurrent_cycle_start(const Ticks& start_time);
682 void register_concurrent_cycle_end();
683 void trace_heap_after_concurrent_cycle();
684
685 G1YCType yc_type();
686
687 G1HRPrinter* hr_printer() { return &_hr_printer; }
688
689 // Allocates a new heap region instance.
690 HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
691
692 // Frees a non-humongous region by initializing its contents and
693 // adding it to the free list that's passed as a parameter (this is
694 // usually a local list which will be appended to the master free
695 // list later). The used bytes of freed regions are accumulated in
696 // pre_used. If par is true, the region's RSet will not be freed
697 // up. The assumption is that this will be done later.
698 // The locked parameter indicates if the caller has already taken
699 // care of proper synchronization. This may allow some optimizations.
700 void free_region(HeapRegion* hr,
701 FreeRegionList* free_list,
702 bool par,
703 bool locked = false);
704
705 // Frees a humongous region by collapsing it into individual regions
706 // and calling free_region() for each of them. The freed regions
707 // will be added to the free list that's passed as a parameter (this
708 // is usually a local list which will be appended to the master free
709 // list later). The used bytes of freed regions are accumulated in
710 // pre_used. If par is true, the region's RSet will not be freed
711 // up. The assumption is that this will be done later.
1073 // The shared block offset table array.
1074 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1075
1076 // Reference Processing accessors
1077
1078 // The STW reference processor....
1079 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1080
1081 // The Concurrent Marking reference processor...
1082 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1083
1084 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1085 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1086
1087 virtual size_t capacity() const;
1088 virtual size_t used() const;
1089 // This should be called when we're not holding the heap lock. The
1090 // result might be a bit inaccurate.
1091 size_t used_unlocked() const;
1092 size_t recalculate_used() const;
1093
1094 void increase_used(size_t bytes) { _summary_bytes_used += bytes; }
1095 void set_used(size_t bytes) { _summary_bytes_used = bytes; }
1096
1097 void decrease_used(size_t bytes) {
1098 assert(_summary_bytes_used >= bytes,
1099 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
1100 _summary_bytes_used, bytes));
1101 _summary_bytes_used -= bytes;
1102 }
1103
1104 // These virtual functions do the actual allocation.
1105 // Some heaps may offer a contiguous region for shared non-blocking
1106 // allocation, via inlined code (by exporting the address of the top and
1107 // end fields defining the extent of the contiguous allocation region.)
1108 // But G1CollectedHeap doesn't yet support this.
1109
1110 virtual bool is_maximal_no_gc() const {
1111 return _hrm.available() == 0;
1112 }
1113
1114 // The current number of regions in the heap.
1115 uint num_regions() const { return _hrm.length(); }
1116
1117 // The max number of regions in the heap.
1118 uint max_regions() const { return _hrm.max_length(); }
1119
1120 // The number of regions that are completely free.
1121 uint num_free_regions() const { return _hrm.num_free_regions(); }
1122
|