63 // may combine concurrent marking with parallel, incremental compaction of
64 // heap subsets that will yield large amounts of garbage.
65
66 // Forward declarations
67 class HeapRegion;
68 class GenerationSpec;
69 class G1ParScanThreadState;
70 class G1ParScanThreadStateSet;
71 class G1ParScanThreadState;
72 class MemoryPool;
73 class MemoryManager;
74 class ObjectClosure;
75 class SpaceClosure;
76 class CompactibleSpaceClosure;
77 class Space;
78 class G1CardTableEntryClosure;
79 class G1CollectionSet;
80 class G1Policy;
81 class G1HotCardCache;
82 class G1RemSet;
83 class G1YoungRemSetSamplingThread;
84 class G1ConcurrentMark;
85 class G1ConcurrentMarkThread;
86 class G1ConcurrentRefine;
87 class GenerationCounters;
88 class STWGCTimer;
89 class G1NewTracer;
90 class EvacuationFailedInfo;
91 class nmethod;
92 class WorkGang;
93 class G1Allocator;
94 class G1ArchiveAllocator;
95 class G1FullGCScope;
96 class G1HeapVerifier;
97 class G1HeapSizingPolicy;
98 class G1HeapSummary;
99 class G1EvacSummary;
100
101 typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
102 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
103
137 friend class VM_G1TryInitiateConcMark;
138 friend class VMStructs;
139 friend class MutatorAllocRegion;
140 friend class G1FullCollector;
141 friend class G1GCAllocRegion;
142 friend class G1HeapVerifier;
143
144 // Closures used in implementation.
145 friend class G1ParScanThreadState;
146 friend class G1ParScanThreadStateSet;
147 friend class G1EvacuateRegionsTask;
148 friend class G1PLABAllocator;
149
150 // Other related classes.
151 friend class HeapRegionClaimer;
152
153 // Testing classes.
154 friend class G1CheckRegionAttrTableClosure;
155
156 private:
157 G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
158
159 WorkGang* _workers;
160 G1CardTable* _card_table;
161
162 Ticks _collection_pause_end;
163
164 SoftRefPolicy _soft_ref_policy;
165
166 static size_t _humongous_object_threshold_in_words;
167
168 // These sets keep track of old, archive and humongous regions respectively.
169 HeapRegionSet _old_set;
170 HeapRegionSet _archive_set;
171 HeapRegionSet _humongous_set;
172
173 void eagerly_reclaim_humongous_regions();
174 // Start a new incremental collection set for the next pause.
175 void start_new_collection_set();
176
177 // The block offset table for the G1 heap.
530 // to support an allocation of the given "word_size". If
531 // successful, perform the allocation and return the address of the
532 // allocated block, or else "NULL".
533 HeapWord* expand_and_allocate(size_t word_size);
534
535 // Process any reference objects discovered.
536 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
537
538 // If during a concurrent start pause we may install a pending list head which is not
539 // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
540 // to discover.
541 void make_pending_list_reachable();
542
543 // Merges the information gathered on a per-thread basis for all worker threads
544 // during GC into global variables.
545 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
546
547 void verify_numa_regions(const char* desc);
548
549 public:
550 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
551
552 WorkGang* workers() const { return _workers; }
553
554 // Runs the given AbstractGangTask with the current active workers,
555 // returning the total time taken.
556 Tickspan run_task_timed(AbstractGangTask* task);
557
558 G1Allocator* allocator() {
559 return _allocator;
560 }
561
562 G1HeapVerifier* verifier() {
563 return _verifier;
564 }
565
566 G1MonitoringSupport* g1mm() {
567 assert(_g1mm != NULL, "should have been initialized");
568 return _g1mm;
569 }
570
951 // _is_alive_non_header field. Supplying a value for the
952 // _is_alive_non_header field is optional but doing so prevents
953 // unnecessary additions to the discovered lists during reference
954 // discovery.
955 G1CMIsAliveClosure _is_alive_closure_cm;
956
957 G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
958 public:
959
960 G1ScannerTasksQueue* task_queue(uint i) const;
961
962 uint num_task_queues() const;
963
964 // Create a G1CollectedHeap.
965 // Must call the initialize method afterwards.
966 // May not return if something goes wrong.
967 G1CollectedHeap();
968
969 private:
970 jint initialize_concurrent_refinement();
971 jint initialize_young_gen_sampling_thread();
972 public:
973 // Initialize the G1CollectedHeap to have the initial and
974 // maximum sizes and remembered and barrier sets
975 // specified by the policy object.
976 jint initialize();
977
978 virtual void stop();
979 virtual void safepoint_synchronize_begin();
980 virtual void safepoint_synchronize_end();
981
982 // Does operations required after initialization has been done.
983 void post_initialize();
984
985 // Initialize weak reference processing.
986 void ref_processing_init();
987
988 virtual Name kind() const {
989 return CollectedHeap::G1;
990 }
991
|
63 // may combine concurrent marking with parallel, incremental compaction of
64 // heap subsets that will yield large amounts of garbage.
65
66 // Forward declarations
67 class HeapRegion;
68 class GenerationSpec;
69 class G1ParScanThreadState;
70 class G1ParScanThreadStateSet;
71 class G1ParScanThreadState;
72 class MemoryPool;
73 class MemoryManager;
74 class ObjectClosure;
75 class SpaceClosure;
76 class CompactibleSpaceClosure;
77 class Space;
78 class G1CardTableEntryClosure;
79 class G1CollectionSet;
80 class G1Policy;
81 class G1HotCardCache;
82 class G1RemSet;
83 class G1ServiceThread;
84 class G1ConcurrentMark;
85 class G1ConcurrentMarkThread;
86 class G1ConcurrentRefine;
87 class GenerationCounters;
88 class STWGCTimer;
89 class G1NewTracer;
90 class EvacuationFailedInfo;
91 class nmethod;
92 class WorkGang;
93 class G1Allocator;
94 class G1ArchiveAllocator;
95 class G1FullGCScope;
96 class G1HeapVerifier;
97 class G1HeapSizingPolicy;
98 class G1HeapSummary;
99 class G1EvacSummary;
100
101 typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
102 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
103
137 friend class VM_G1TryInitiateConcMark;
138 friend class VMStructs;
139 friend class MutatorAllocRegion;
140 friend class G1FullCollector;
141 friend class G1GCAllocRegion;
142 friend class G1HeapVerifier;
143
144 // Closures used in implementation.
145 friend class G1ParScanThreadState;
146 friend class G1ParScanThreadStateSet;
147 friend class G1EvacuateRegionsTask;
148 friend class G1PLABAllocator;
149
150 // Other related classes.
151 friend class HeapRegionClaimer;
152
153 // Testing classes.
154 friend class G1CheckRegionAttrTableClosure;
155
156 private:
157 G1ServiceThread* _service_thread;
158
159 WorkGang* _workers;
160 G1CardTable* _card_table;
161
162 Ticks _collection_pause_end;
163
164 SoftRefPolicy _soft_ref_policy;
165
166 static size_t _humongous_object_threshold_in_words;
167
168 // These sets keep track of old, archive and humongous regions respectively.
169 HeapRegionSet _old_set;
170 HeapRegionSet _archive_set;
171 HeapRegionSet _humongous_set;
172
173 void eagerly_reclaim_humongous_regions();
174 // Start a new incremental collection set for the next pause.
175 void start_new_collection_set();
176
177 // The block offset table for the G1 heap.
530 // to support an allocation of the given "word_size". If
531 // successful, perform the allocation and return the address of the
532 // allocated block, or else "NULL".
533 HeapWord* expand_and_allocate(size_t word_size);
534
535 // Process any reference objects discovered.
536 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
537
538 // If during a concurrent start pause we may install a pending list head which is not
539 // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
540 // to discover.
541 void make_pending_list_reachable();
542
543 // Merges the information gathered on a per-thread basis for all worker threads
544 // during GC into global variables.
545 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
546
547 void verify_numa_regions(const char* desc);
548
549 public:
550 G1ServiceThread* service_thread() const { return _service_thread; }
551
552 WorkGang* workers() const { return _workers; }
553
554 // Runs the given AbstractGangTask with the current active workers,
555 // returning the total time taken.
556 Tickspan run_task_timed(AbstractGangTask* task);
557
558 G1Allocator* allocator() {
559 return _allocator;
560 }
561
562 G1HeapVerifier* verifier() {
563 return _verifier;
564 }
565
566 G1MonitoringSupport* g1mm() {
567 assert(_g1mm != NULL, "should have been initialized");
568 return _g1mm;
569 }
570
951 // _is_alive_non_header field. Supplying a value for the
952 // _is_alive_non_header field is optional but doing so prevents
953 // unnecessary additions to the discovered lists during reference
954 // discovery.
955 G1CMIsAliveClosure _is_alive_closure_cm;
956
957 G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
958 public:
959
960 G1ScannerTasksQueue* task_queue(uint i) const;
961
962 uint num_task_queues() const;
963
964 // Create a G1CollectedHeap.
965 // Must call the initialize method afterwards.
966 // May not return if something goes wrong.
967 G1CollectedHeap();
968
969 private:
970 jint initialize_concurrent_refinement();
971 jint initialize_service_thread();
972 public:
973 // Initialize the G1CollectedHeap to have the initial and
974 // maximum sizes and remembered and barrier sets
975 // specified by the policy object.
976 jint initialize();
977
978 virtual void stop();
979 virtual void safepoint_synchronize_begin();
980 virtual void safepoint_synchronize_end();
981
982 // Does operations required after initialization has been done.
983 void post_initialize();
984
985 // Initialize weak reference processing.
986 void ref_processing_init();
987
988 virtual Name kind() const {
989 return CollectedHeap::G1;
990 }
991
|