56 // It uses the "Garbage First" heap organization and algorithm, which
57 // may combine concurrent marking with parallel, incremental compaction of
58 // heap subsets that will yield large amounts of garbage.
59
60 // Forward declarations
61 class HeapRegion;
62 class HRRSCleanupTask;
63 class GenerationSpec;
64 class G1ParScanThreadState;
65 class G1ParScanThreadStateSet;
66 class G1ParScanThreadState;
67 class ObjectClosure;
68 class SpaceClosure;
69 class CompactibleSpaceClosure;
70 class Space;
71 class G1CollectionSet;
72 class G1CollectorPolicy;
73 class G1Policy;
74 class G1HotCardCache;
75 class G1RemSet;
76 class HeapRegionRemSetIterator;
77 class G1ConcurrentMark;
78 class ConcurrentMarkThread;
79 class G1ConcurrentRefine;
80 class GenerationCounters;
81 class STWGCTimer;
82 class G1NewTracer;
83 class EvacuationFailedInfo;
84 class nmethod;
85 class Ticks;
86 class WorkGang;
87 class G1Allocator;
88 class G1ArchiveAllocator;
89 class G1FullGCScope;
90 class G1HeapVerifier;
91 class G1HeapSizingPolicy;
92 class G1HeapSummary;
93 class G1EvacSummary;
94
95 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
125 friend class VM_G1IncCollectionPause;
126 friend class VMStructs;
127 friend class MutatorAllocRegion;
128 friend class G1GCAllocRegion;
129 friend class G1HeapVerifier;
130
131 // Closures used in implementation.
132 friend class G1ParScanThreadState;
133 friend class G1ParScanThreadStateSet;
134 friend class G1ParTask;
135 friend class G1PLABAllocator;
136 friend class G1PrepareCompactClosure;
137
138 // Other related classes.
139 friend class HeapRegionClaimer;
140
141 // Testing classes.
142 friend class G1CheckCSetFastTableClosure;
143
144 private:
145 WorkGang* _workers;
146 G1CollectorPolicy* _collector_policy;
147
148 static size_t _humongous_object_threshold_in_words;
149
150 // The secondary free list which contains regions that have been
151 // freed up during the cleanup process. This will be appended to
152 // the master free list when appropriate.
153 FreeRegionList _secondary_free_list;
154
155 // It keeps track of the old regions.
156 HeapRegionSet _old_set;
157
158 // It keeps track of the humongous regions.
159 HeapRegionSet _humongous_set;
160
161 void eagerly_reclaim_humongous_regions();
162 // Start a new incremental collection set for the next pause.
163 void start_new_collection_set();
164
536 // to support an allocation of the given "word_size". If
537 // successful, perform the allocation and return the address of the
538 // allocated block, or else "NULL".
539 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
540
541 // Preserve any referents discovered by concurrent marking that have not yet been
542 // copied by the STW pause.
543 void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
544 // Process any reference objects discovered during
545 // an incremental evacuation pause.
546 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
547
548 // Enqueue any remaining discovered references
549 // after processing.
550 void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
551
552 // Merges the information gathered on a per-thread basis for all worker threads
553 // during GC into global variables.
554 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
555 public:
556 WorkGang* workers() const { return _workers; }
557
558 G1Allocator* allocator() {
559 return _allocator;
560 }
561
562 G1HeapVerifier* verifier() {
563 return _verifier;
564 }
565
566 G1MonitoringSupport* g1mm() {
567 assert(_g1mm != NULL, "should have been initialized");
568 return _g1mm;
569 }
570
571 // Expand the garbage-first heap by at least the given size (in bytes!).
572 // Returns true if the heap was expanded by the requested amount;
573 // false otherwise.
574 // (Rounds up to a HeapRegion boundary.)
575 bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
942 G1CMIsAliveClosure _is_alive_closure_cm;
943
944 volatile bool _free_regions_coming;
945
946 public:
947
948 RefToScanQueue *task_queue(uint i) const;
949
950 uint num_task_queues() const;
951
952 // A set of cards where updates happened during the GC
953 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
954
955 // Create a G1CollectedHeap with the specified policy.
956 // Must call the initialize method afterwards.
957 // May not return if something goes wrong.
958 G1CollectedHeap(G1CollectorPolicy* policy);
959
960 private:
961 jint initialize_concurrent_refinement();
962 public:
963 // Initialize the G1CollectedHeap to have the initial and
964 // maximum sizes and remembered and barrier sets
965 // specified by the policy object.
966 jint initialize();
967
968 virtual void stop();
969 virtual void safepoint_synchronize_begin();
970 virtual void safepoint_synchronize_end();
971
972 // Return the (conservative) maximum heap alignment for any G1 heap
973 static size_t conservative_max_heap_alignment();
974
975 // Does operations required after initialization has been done.
976 void post_initialize();
977
978 // Initialize weak reference processing.
979 void ref_processing_init();
980
981 virtual Name kind() const {
|
56 // It uses the "Garbage First" heap organization and algorithm, which
57 // may combine concurrent marking with parallel, incremental compaction of
58 // heap subsets that will yield large amounts of garbage.
59
60 // Forward declarations
61 class HeapRegion;
62 class HRRSCleanupTask;
63 class GenerationSpec;
64 class G1ParScanThreadState;
65 class G1ParScanThreadStateSet;
66 class G1ParScanThreadState;
67 class ObjectClosure;
68 class SpaceClosure;
69 class CompactibleSpaceClosure;
70 class Space;
71 class G1CollectionSet;
72 class G1CollectorPolicy;
73 class G1Policy;
74 class G1HotCardCache;
75 class G1RemSet;
76 class G1YoungRemSetSamplingThread;
77 class HeapRegionRemSetIterator;
78 class G1ConcurrentMark;
79 class ConcurrentMarkThread;
80 class G1ConcurrentRefine;
81 class GenerationCounters;
82 class STWGCTimer;
83 class G1NewTracer;
84 class EvacuationFailedInfo;
85 class nmethod;
86 class Ticks;
87 class WorkGang;
88 class G1Allocator;
89 class G1ArchiveAllocator;
90 class G1FullGCScope;
91 class G1HeapVerifier;
92 class G1HeapSizingPolicy;
93 class G1HeapSummary;
94 class G1EvacSummary;
95
96 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
126 friend class VM_G1IncCollectionPause;
127 friend class VMStructs;
128 friend class MutatorAllocRegion;
129 friend class G1GCAllocRegion;
130 friend class G1HeapVerifier;
131
132 // Closures used in implementation.
133 friend class G1ParScanThreadState;
134 friend class G1ParScanThreadStateSet;
135 friend class G1ParTask;
136 friend class G1PLABAllocator;
137 friend class G1PrepareCompactClosure;
138
139 // Other related classes.
140 friend class HeapRegionClaimer;
141
142 // Testing classes.
143 friend class G1CheckCSetFastTableClosure;
144
145 private:
146 G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
147
148 WorkGang* _workers;
149 G1CollectorPolicy* _collector_policy;
150
151 static size_t _humongous_object_threshold_in_words;
152
153 // The secondary free list which contains regions that have been
154 // freed up during the cleanup process. This will be appended to
155 // the master free list when appropriate.
156 FreeRegionList _secondary_free_list;
157
158 // It keeps track of the old regions.
159 HeapRegionSet _old_set;
160
161 // It keeps track of the humongous regions.
162 HeapRegionSet _humongous_set;
163
164 void eagerly_reclaim_humongous_regions();
165 // Start a new incremental collection set for the next pause.
166 void start_new_collection_set();
167
539 // to support an allocation of the given "word_size". If
540 // successful, perform the allocation and return the address of the
541 // allocated block, or else "NULL".
542 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
543
544 // Preserve any referents discovered by concurrent marking that have not yet been
545 // copied by the STW pause.
546 void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
547 // Process any reference objects discovered during
548 // an incremental evacuation pause.
549 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
550
551 // Enqueue any remaining discovered references
552 // after processing.
553 void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
554
555 // Merges the information gathered on a per-thread basis for all worker threads
556 // during GC into global variables.
557 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
558 public:
559 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
560
561 WorkGang* workers() const { return _workers; }
562
563 G1Allocator* allocator() {
564 return _allocator;
565 }
566
567 G1HeapVerifier* verifier() {
568 return _verifier;
569 }
570
571 G1MonitoringSupport* g1mm() {
572 assert(_g1mm != NULL, "should have been initialized");
573 return _g1mm;
574 }
575
576 // Expand the garbage-first heap by at least the given size (in bytes!).
577 // Returns true if the heap was expanded by the requested amount;
578 // false otherwise.
579 // (Rounds up to a HeapRegion boundary.)
580 bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
947 G1CMIsAliveClosure _is_alive_closure_cm;
948
949 volatile bool _free_regions_coming;
950
951 public:
952
953 RefToScanQueue *task_queue(uint i) const;
954
955 uint num_task_queues() const;
956
957 // A set of cards where updates happened during the GC
958 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
959
960 // Create a G1CollectedHeap with the specified policy.
961 // Must call the initialize method afterwards.
962 // May not return if something goes wrong.
963 G1CollectedHeap(G1CollectorPolicy* policy);
964
965 private:
966 jint initialize_concurrent_refinement();
967 jint initialize_young_gen_sampling_thread();
968 public:
969 // Initialize the G1CollectedHeap to have the initial and
970 // maximum sizes and remembered and barrier sets
971 // specified by the policy object.
972 jint initialize();
973
974 virtual void stop();
975 virtual void safepoint_synchronize_begin();
976 virtual void safepoint_synchronize_end();
977
978 // Return the (conservative) maximum heap alignment for any G1 heap
979 static size_t conservative_max_heap_alignment();
980
981 // Does operations required after initialization has been done.
982 void post_initialize();
983
984 // Initialize weak reference processing.
985 void ref_processing_init();
986
987 virtual Name kind() const {
|