58 class G1ParScanThreadState;
59 class ObjectClosure;
60 class SpaceClosure;
61 class CompactibleSpaceClosure;
62 class Space;
63 class G1CollectorPolicy;
64 class GenRemSet;
65 class G1RemSet;
66 class HeapRegionRemSetIterator;
67 class ConcurrentMark;
68 class ConcurrentMarkThread;
69 class ConcurrentG1Refine;
70 class ConcurrentGCTimer;
71 class GenerationCounters;
72 class STWGCTimer;
73 class G1NewTracer;
74 class G1OldTracer;
75 class EvacuationFailedInfo;
76 class nmethod;
77 class Ticks;
78 class FlexibleWorkGang;
79
80 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
81 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
82
83 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
84 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
85
86 class YoungList : public CHeapObj<mtGC> {
87 private:
88 G1CollectedHeap* _g1h;
89
90 HeapRegion* _head;
91
92 HeapRegion* _survivor_head;
93 HeapRegion* _survivor_tail;
94
95 HeapRegion* _curr;
96
97 uint _length;
98 uint _survivor_length;
184 friend class VM_G1IncCollectionPause;
185 friend class VMStructs;
186 friend class MutatorAllocRegion;
187 friend class SurvivorGCAllocRegion;
188 friend class OldGCAllocRegion;
189 friend class G1Allocator;
190
191 // Closures used in implementation.
192 friend class G1ParScanThreadState;
193 friend class G1ParTask;
194 friend class G1ParGCAllocator;
195 friend class G1PrepareCompactClosure;
196
197 // Other related classes.
198 friend class HeapRegionClaimer;
199
200 // Testing classes.
201 friend class G1CheckCSetFastTableClosure;
202
203 private:
204 FlexibleWorkGang* _workers;
205
206 static size_t _humongous_object_threshold_in_words;
207
208 // The secondary free list which contains regions that have been
209 // freed up during the cleanup process. This will be appended to
210 // the master free list when appropriate.
211 FreeRegionList _secondary_free_list;
212
213 // It keeps track of the old regions.
214 HeapRegionSet _old_set;
215
216 // It keeps track of the humongous regions.
217 HeapRegionSet _humongous_set;
218
219 void eagerly_reclaim_humongous_regions();
220
221 // The number of regions we could create by expansion.
222 uint _expansion_regions;
223
224 // The block offset table for the G1 heap.
596 // failed allocation request (including collection, expansion, etc.)
597 HeapWord* satisfy_failed_allocation(size_t word_size,
598 AllocationContext_t context,
599 bool* succeeded);
600
601 // Attempting to expand the heap sufficiently
602 // to support an allocation of the given "word_size". If
603 // successful, perform the allocation and return the address of the
604 // allocated block, or else "NULL".
605 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
606
607 // Process any reference objects discovered during
608 // an incremental evacuation pause.
609 void process_discovered_references();
610
611 // Enqueue any remaining discovered references
612 // after processing.
613 void enqueue_discovered_references();
614
615 public:
616 FlexibleWorkGang* workers() const { return _workers; }
617
618 G1Allocator* allocator() {
619 return _allocator;
620 }
621
622 G1MonitoringSupport* g1mm() {
623 assert(_g1mm != NULL, "should have been initialized");
624 return _g1mm;
625 }
626
627 // Expand the garbage-first heap by at least the given size (in bytes!).
628 // Returns true if the heap was expanded by the requested amount;
629 // false otherwise.
630 // (Rounds up to a HeapRegion boundary.)
631 bool expand(size_t expand_bytes);
632
633 // Returns the PLAB statistics for a given destination.
634 inline PLABStats* alloc_buffer_stats(InCSetState dest);
635
636 // Determines PLAB size for a given destination.
|
58 class G1ParScanThreadState;
59 class ObjectClosure;
60 class SpaceClosure;
61 class CompactibleSpaceClosure;
62 class Space;
63 class G1CollectorPolicy;
64 class GenRemSet;
65 class G1RemSet;
66 class HeapRegionRemSetIterator;
67 class ConcurrentMark;
68 class ConcurrentMarkThread;
69 class ConcurrentG1Refine;
70 class ConcurrentGCTimer;
71 class GenerationCounters;
72 class STWGCTimer;
73 class G1NewTracer;
74 class G1OldTracer;
75 class EvacuationFailedInfo;
76 class nmethod;
77 class Ticks;
78 class WorkGang;
79
80 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
81 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
82
83 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
84 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
85
86 class YoungList : public CHeapObj<mtGC> {
87 private:
88 G1CollectedHeap* _g1h;
89
90 HeapRegion* _head;
91
92 HeapRegion* _survivor_head;
93 HeapRegion* _survivor_tail;
94
95 HeapRegion* _curr;
96
97 uint _length;
98 uint _survivor_length;
184 friend class VM_G1IncCollectionPause;
185 friend class VMStructs;
186 friend class MutatorAllocRegion;
187 friend class SurvivorGCAllocRegion;
188 friend class OldGCAllocRegion;
189 friend class G1Allocator;
190
191 // Closures used in implementation.
192 friend class G1ParScanThreadState;
193 friend class G1ParTask;
194 friend class G1ParGCAllocator;
195 friend class G1PrepareCompactClosure;
196
197 // Other related classes.
198 friend class HeapRegionClaimer;
199
200 // Testing classes.
201 friend class G1CheckCSetFastTableClosure;
202
203 private:
204 WorkGang* _workers;
205
206 static size_t _humongous_object_threshold_in_words;
207
208 // The secondary free list which contains regions that have been
209 // freed up during the cleanup process. This will be appended to
210 // the master free list when appropriate.
211 FreeRegionList _secondary_free_list;
212
213 // It keeps track of the old regions.
214 HeapRegionSet _old_set;
215
216 // It keeps track of the humongous regions.
217 HeapRegionSet _humongous_set;
218
219 void eagerly_reclaim_humongous_regions();
220
221 // The number of regions we could create by expansion.
222 uint _expansion_regions;
223
224 // The block offset table for the G1 heap.
596 // failed allocation request (including collection, expansion, etc.)
597 HeapWord* satisfy_failed_allocation(size_t word_size,
598 AllocationContext_t context,
599 bool* succeeded);
600
601 // Attempting to expand the heap sufficiently
602 // to support an allocation of the given "word_size". If
603 // successful, perform the allocation and return the address of the
604 // allocated block, or else "NULL".
605 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
606
607 // Process any reference objects discovered during
608 // an incremental evacuation pause.
609 void process_discovered_references();
610
611 // Enqueue any remaining discovered references
612 // after processing.
613 void enqueue_discovered_references();
614
615 public:
616 WorkGang* workers() const { return _workers; }
617
618 G1Allocator* allocator() {
619 return _allocator;
620 }
621
622 G1MonitoringSupport* g1mm() {
623 assert(_g1mm != NULL, "should have been initialized");
624 return _g1mm;
625 }
626
627 // Expand the garbage-first heap by at least the given size (in bytes!).
628 // Returns true if the heap was expanded by the requested amount;
629 // false otherwise.
630 // (Rounds up to a HeapRegion boundary.)
631 bool expand(size_t expand_bytes);
632
633 // Returns the PLAB statistics for a given destination.
634 inline PLABStats* alloc_buffer_stats(InCSetState dest);
635
636 // Determines PLAB size for a given destination.
|