59 class G1ParScanThreadState;
60 class ObjectClosure;
61 class SpaceClosure;
62 class CompactibleSpaceClosure;
63 class Space;
64 class G1CollectorPolicy;
65 class GenRemSet;
66 class G1RemSet;
67 class HeapRegionRemSetIterator;
68 class ConcurrentMark;
69 class ConcurrentMarkThread;
70 class ConcurrentG1Refine;
71 class ConcurrentGCTimer;
72 class GenerationCounters;
73 class STWGCTimer;
74 class G1NewTracer;
75 class G1OldTracer;
76 class EvacuationFailedInfo;
77 class nmethod;
78 class Ticks;
79
80 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
81 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
82
83 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
84 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
85
86 class YoungList : public CHeapObj<mtGC> {
87 private:
88 G1CollectedHeap* _g1h;
89
90 HeapRegion* _head;
91
92 HeapRegion* _survivor_head;
93 HeapRegion* _survivor_tail;
94
95 HeapRegion* _curr;
96
97 uint _length;
98 uint _survivor_length;
187 friend class SurvivorGCAllocRegion;
188 friend class OldGCAllocRegion;
189 friend class G1Allocator;
190
191 // Closures used in implementation.
192 friend class G1ParScanThreadState;
193 friend class G1ParTask;
194 friend class G1ParGCAllocator;
195 friend class G1PrepareCompactClosure;
196
197 // Other related classes.
198 friend class HeapRegionClaimer;
199
200 // Testing classes.
201 friend class G1CheckCSetFastTableClosure;
202
203 private:
204 // The one and only G1CollectedHeap, so static functions can find it.
205 static G1CollectedHeap* _g1h;
206
207 static size_t _humongous_object_threshold_in_words;
208
209 // The secondary free list which contains regions that have been
210 // freed up during the cleanup process. This will be appended to
211 // the master free list when appropriate.
212 FreeRegionList _secondary_free_list;
213
214 // It keeps track of the old regions.
215 HeapRegionSet _old_set;
216
217 // It keeps track of the humongous regions.
218 HeapRegionSet _humongous_set;
219
220 void clear_humongous_is_live_table();
221 void eagerly_reclaim_humongous_regions();
222
223 // The number of regions we could create by expansion.
224 uint _expansion_regions;
225
226 // The block offset table for the G1 heap.
588 // failed allocation request (including collection, expansion, etc.)
589 HeapWord* satisfy_failed_allocation(size_t word_size,
590 AllocationContext_t context,
591 bool* succeeded);
592
593 // Attempting to expand the heap sufficiently
594 // to support an allocation of the given "word_size". If
595 // successful, perform the allocation and return the address of the
596 // allocated block, or else "NULL".
597 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
598
599 // Process any reference objects discovered during
600 // an incremental evacuation pause.
601 void process_discovered_references(uint no_of_gc_workers);
602
603 // Enqueue any remaining discovered references
604 // after processing.
605 void enqueue_discovered_references(uint no_of_gc_workers);
606
607 public:
608
609 G1Allocator* allocator() {
610 return _allocator;
611 }
612
613 G1MonitoringSupport* g1mm() {
614 assert(_g1mm != NULL, "should have been initialized");
615 return _g1mm;
616 }
617
618 // Expand the garbage-first heap by at least the given size (in bytes!).
619 // Returns true if the heap was expanded by the requested amount;
620 // false otherwise.
621 // (Rounds up to a HeapRegion boundary.)
622 bool expand(size_t expand_bytes);
623
624 // Returns the PLAB statistics for a given destination.
625 inline PLABStats* alloc_buffer_stats(InCSetState dest);
626
627 // Determines PLAB size for a given destination.
|
59 class G1ParScanThreadState;
60 class ObjectClosure;
61 class SpaceClosure;
62 class CompactibleSpaceClosure;
63 class Space;
64 class G1CollectorPolicy;
65 class GenRemSet;
66 class G1RemSet;
67 class HeapRegionRemSetIterator;
68 class ConcurrentMark;
69 class ConcurrentMarkThread;
70 class ConcurrentG1Refine;
71 class ConcurrentGCTimer;
72 class GenerationCounters;
73 class STWGCTimer;
74 class G1NewTracer;
75 class G1OldTracer;
76 class EvacuationFailedInfo;
77 class nmethod;
78 class Ticks;
79 class FlexibleWorkGang;
80
81 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
82 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
83
84 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
85 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
86
87 class YoungList : public CHeapObj<mtGC> {
88 private:
89 G1CollectedHeap* _g1h;
90
91 HeapRegion* _head;
92
93 HeapRegion* _survivor_head;
94 HeapRegion* _survivor_tail;
95
96 HeapRegion* _curr;
97
98 uint _length;
99 uint _survivor_length;
188 friend class SurvivorGCAllocRegion;
189 friend class OldGCAllocRegion;
190 friend class G1Allocator;
191
192 // Closures used in implementation.
193 friend class G1ParScanThreadState;
194 friend class G1ParTask;
195 friend class G1ParGCAllocator;
196 friend class G1PrepareCompactClosure;
197
198 // Other related classes.
199 friend class HeapRegionClaimer;
200
201 // Testing classes.
202 friend class G1CheckCSetFastTableClosure;
203
204 private:
205 // The one and only G1CollectedHeap, so static functions can find it.
206 static G1CollectedHeap* _g1h;
207
208 FlexibleWorkGang* _workers;
209
210 static size_t _humongous_object_threshold_in_words;
211
212 // The secondary free list which contains regions that have been
213 // freed up during the cleanup process. This will be appended to
214 // the master free list when appropriate.
215 FreeRegionList _secondary_free_list;
216
217 // It keeps track of the old regions.
218 HeapRegionSet _old_set;
219
220 // It keeps track of the humongous regions.
221 HeapRegionSet _humongous_set;
222
223 void clear_humongous_is_live_table();
224 void eagerly_reclaim_humongous_regions();
225
226 // The number of regions we could create by expansion.
227 uint _expansion_regions;
228
229 // The block offset table for the G1 heap.
591 // failed allocation request (including collection, expansion, etc.)
592 HeapWord* satisfy_failed_allocation(size_t word_size,
593 AllocationContext_t context,
594 bool* succeeded);
595
596 // Attempting to expand the heap sufficiently
597 // to support an allocation of the given "word_size". If
598 // successful, perform the allocation and return the address of the
599 // allocated block, or else "NULL".
600 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
601
602 // Process any reference objects discovered during
603 // an incremental evacuation pause.
604 void process_discovered_references(uint no_of_gc_workers);
605
606 // Enqueue any remaining discovered references
607 // after processing.
608 void enqueue_discovered_references(uint no_of_gc_workers);
609
610 public:
611 FlexibleWorkGang* workers() const { return _workers; }
612
613 G1Allocator* allocator() {
614 return _allocator;
615 }
616
617 G1MonitoringSupport* g1mm() {
618 assert(_g1mm != NULL, "should have been initialized");
619 return _g1mm;
620 }
621
622 // Expand the garbage-first heap by at least the given size (in bytes!).
623 // Returns true if the heap was expanded by the requested amount;
624 // false otherwise.
625 // (Rounds up to a HeapRegion boundary.)
626 bool expand(size_t expand_bytes);
627
628 // Returns the PLAB statistics for a given destination.
629 inline PLABStats* alloc_buffer_stats(InCSetState dest);
630
631 // Determines PLAB size for a given destination.
|