70 // The generational collector policy.
71 GenCollectorPolicy* _gen_policy;
72
73 // Indicates that the most recent previous incremental collection failed.
74 // The flag is cleared when an action is taken that might clear the
75 // condition that caused that incremental collection to fail.
76 bool _incremental_collection_failed;
77
78 // In support of ExplicitGCInvokesConcurrent functionality
79 unsigned int _full_collections_completed;
80
81 // Data structure for claiming the (potentially) parallel tasks in
82 // (gen-specific) roots processing.
83 SubTasksDone* _process_strong_tasks;
84
85 // Collects the given generation.
86 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
87 bool run_verification, bool clear_soft_refs,
88 bool restore_marks_for_biased_locking);
89
90 // In block contents verification, the number of header words to skip
91 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
92
93 WorkGang* _workers;
94
95 protected:
96 // Helper functions for allocation
97 HeapWord* attempt_allocation(size_t size,
98 bool is_tlab,
99 bool first_only);
100
101 // Helper function for two callbacks below.
102 // Considers collection of the first max_level+1 generations.
103 void do_collection(bool full,
104 bool clear_all_soft_refs,
105 size_t size,
106 bool is_tlab,
107 GenerationType max_generation);
108
109 // Callback from VM_GenCollectForAllocation operation.
110 // This function does everything necessary/possible to satisfy an
111 // allocation request that failed in the youngest generation that should
112 // have handled it (including collection, expansion, etc.)
113 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
114
115 // Callback from VM_GenCollectFull operation.
116 // Perform a full collection of the first max_level+1 generations.
117 virtual void do_full_collection(bool clear_all_soft_refs);
118 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
119
120 // Does the "cause" of GC indicate that
121 // we absolutely __must__ clear soft refs?
122 bool must_clear_all_soft_refs();
123
124 public:
125 GenCollectedHeap(GenCollectorPolicy *policy);
126
127 WorkGang* workers() const { return _workers; }
128
129 // Returns JNI_OK on success
130 virtual jint initialize();
131
132 // Reserve aligned space for the heap as needed by the contained generations.
133 char* allocate(size_t alignment, ReservedSpace* heap_rs);
134
135 // Does operations required after initialization has been done.
136 void post_initialize();
137
138 // Initialize ("weak") refs processing support
139 virtual void ref_processing_init();
140
141 virtual Name kind() const {
142 return CollectedHeap::GenCollectedHeap;
143 }
144
145 virtual const char* name() const {
146 if (UseConcMarkSweepGC) {
147 return "Concurrent Mark Sweep";
148 } else {
149 return "Serial";
150 }
151 }
152
153 Generation* young_gen() const { return _young_gen; }
154 Generation* old_gen() const { return _old_gen; }
155
156 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
157 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
158
159 // The generational collector policy.
160 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
161
162 virtual CollectorPolicy* collector_policy() const { return gen_policy(); }
163
164 // Adaptive size policy
165 virtual AdaptiveSizePolicy* size_policy() {
166 return gen_policy()->size_policy();
167 }
168
169 // Return the (conservative) maximum heap alignment
170 static size_t conservative_max_heap_alignment() {
171 return Generation::GenGrain;
173
174 size_t capacity() const;
175 size_t used() const;
176
177 // Save the "used_region" for both generations.
178 void save_used_regions();
179
180 size_t max_capacity() const;
181
182 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
183
184 // We may support a shared contiguous allocation area, if the youngest
185 // generation does.
186 bool supports_inline_contig_alloc() const;
187 HeapWord* volatile* top_addr() const;
188 HeapWord** end_addr() const;
189
190 // Perform a full collection of the heap; intended for use in implementing
191 // "System.gc". This implies as full a collection as the CollectedHeap
192 // supports. Caller does not hold the Heap_lock on entry.
193 void collect(GCCause::Cause cause);
194
195 // The same as above but assume that the caller holds the Heap_lock.
196 void collect_locked(GCCause::Cause cause);
197
198 // Perform a full collection of generations up to and including max_generation.
199 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
200 void collect(GCCause::Cause cause, GenerationType max_generation);
201
202 // Returns "TRUE" iff "p" points into the committed areas of the heap.
203 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
204 // be expensive to compute in general, so, to prevent
205 // their inadvertent use in product jvm's, we restrict their use to
206 // assertion checking or verification only.
207 bool is_in(const void* p) const;
208
209 // override
210 bool is_in_closed_subset(const void* p) const {
211 if (UseConcMarkSweepGC) {
212 return is_in_reserved(p);
213 } else {
214 return is_in(p);
215 }
216 }
217
218 // Returns true if the reference is to an object in the reserved space
219 // for the young generation.
220 // Assumes the the young gen address range is less than that of the old gen.
221 bool is_in_young(oop p);
222
223 #ifdef ASSERT
224 bool is_in_partial_collection(const void* p);
225 #endif
226
227 virtual bool is_scavengable(const void* addr) {
228 return is_in_young((oop)addr);
229 }
230
231 // Iteration functions.
232 void oop_iterate_no_header(OopClosure* cl);
233 void oop_iterate(ExtendedOopClosure* cl);
234 void object_iterate(ObjectClosure* cl);
235 void safe_object_iterate(ObjectClosure* cl);
236 Space* space_containing(const void* addr) const;
261 // the block is an object. Assumes (and verifies in non-product
262 // builds) that addr is in the allocated part of the heap and is
263 // the start of a chunk.
264 virtual bool block_is_obj(const HeapWord* addr) const;
265
266 // Section on TLAB's.
267 virtual bool supports_tlab_allocation() const;
268 virtual size_t tlab_capacity(Thread* thr) const;
269 virtual size_t tlab_used(Thread* thr) const;
270 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
271 virtual HeapWord* allocate_new_tlab(size_t size);
272
273 // Can a compiler initialize a new object without store barriers?
274 // This permission only extends from the creation of a new object
275 // via a TLAB up to the first subsequent safepoint.
276 virtual bool can_elide_tlab_store_barriers() const {
277 return true;
278 }
279
280 virtual bool card_mark_must_follow_store() const {
281 return UseConcMarkSweepGC;
282 }
283
284 // We don't need barriers for stores to objects in the
285 // young gen and, a fortiori, for initializing stores to
286 // objects therein. This applies to DefNew+Tenured and ParNew+CMS
287 // only and may need to be re-examined in case other
288 // kinds of collectors are implemented in the future.
289 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
290 return is_in_young(new_obj);
291 }
292
293 // The "requestor" generation is performing some garbage collection
294 // action for which it would be useful to have scratch space. The
295 // requestor promises to allocate no more than "max_alloc_words" in any
296 // older generation (via promotion say.) Any blocks of space that can
297 // be provided are returned as a list of ScratchBlocks, sorted by
298 // decreasing size.
299 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
300 // Allow each generation to reset any scratch space that it has
301 // contributed as it needs.
482 private:
483 // Accessor for memory state verification support
484 NOT_PRODUCT(
485 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
486 )
487
488 // Override
489 void check_for_non_bad_heap_word_value(HeapWord* addr,
490 size_t size) PRODUCT_RETURN;
491
492 // For use by mark-sweep. As implemented, mark-sweep-compact is global
493 // in an essential way: compaction is performed across generations, by
494 // iterating over spaces.
495 void prepare_for_compaction();
496
497 // Perform a full collection of the generations up to and including max_generation.
498 // This is the low level interface used by the public versions of
499 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
500 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
501
502 // Returns success or failure.
503 bool create_cms_collector();
504
505 // In support of ExplicitGCInvokesConcurrent functionality
506 bool should_do_concurrent_full_gc(GCCause::Cause cause);
507 void collect_mostly_concurrent(GCCause::Cause cause);
508
509 // Save the tops of the spaces in all generations
510 void record_gen_tops_before_GC() PRODUCT_RETURN;
511
512 protected:
513 void gc_prologue(bool full);
514 void gc_epilogue(bool full);
515
516 public:
517 void stop();
518 };
519
520 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|
70 // The generational collector policy.
71 GenCollectorPolicy* _gen_policy;
72
73 // Indicates that the most recent previous incremental collection failed.
74 // The flag is cleared when an action is taken that might clear the
75 // condition that caused that incremental collection to fail.
76 bool _incremental_collection_failed;
77
78 // In support of ExplicitGCInvokesConcurrent functionality
79 unsigned int _full_collections_completed;
80
81 // Data structure for claiming the (potentially) parallel tasks in
82 // (gen-specific) roots processing.
83 SubTasksDone* _process_strong_tasks;
84
85 // Collects the given generation.
86 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
87 bool run_verification, bool clear_soft_refs,
88 bool restore_marks_for_biased_locking);
89
90 protected:
91 // In block contents verification, the number of header words to skip
92 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
93
94 WorkGang* _workers;
95
96 protected:
97 // Helper functions for allocation
98 HeapWord* attempt_allocation(size_t size,
99 bool is_tlab,
100 bool first_only);
101
102 // Helper function for two callbacks below.
103 // Considers collection of the first max_level+1 generations.
104 void do_collection(bool full,
105 bool clear_all_soft_refs,
106 size_t size,
107 bool is_tlab,
108 GenerationType max_generation);
109
110 // Callback from VM_GenCollectForAllocation operation.
111 // This function does everything necessary/possible to satisfy an
112 // allocation request that failed in the youngest generation that should
113 // have handled it (including collection, expansion, etc.)
114 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
115
116 // Callback from VM_GenCollectFull operation.
117 // Perform a full collection of the first max_level+1 generations.
118 virtual void do_full_collection(bool clear_all_soft_refs);
119 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
120
121 // Does the "cause" of GC indicate that
122 // we absolutely __must__ clear soft refs?
123 bool must_clear_all_soft_refs();
124
125 virtual CardTableModRefBSForCTRS* create_barrier_set(MemRegion heap);
126
127 public:
128 GenCollectedHeap(GenCollectorPolicy *policy);
129
130 WorkGang* workers() const { return _workers; }
131
132 // Returns JNI_OK on success
133 virtual jint initialize();
134
135 // Reserve aligned space for the heap as needed by the contained generations.
136 char* allocate(size_t alignment, ReservedSpace* heap_rs);
137
138 // Does operations required after initialization has been done.
139 void post_initialize();
140
141 // Initialize ("weak") refs processing support
142 virtual void ref_processing_init();
143
144 virtual Name kind() const {
145 return CollectedHeap::GenCollectedHeap;
146 }
147
148 virtual const char* name() const {
149 return "Serial";
150 }
151
152 Generation* young_gen() const { return _young_gen; }
153 Generation* old_gen() const { return _old_gen; }
154
155 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
156 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
157
158 // The generational collector policy.
159 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
160
161 virtual CollectorPolicy* collector_policy() const { return gen_policy(); }
162
163 // Adaptive size policy
164 virtual AdaptiveSizePolicy* size_policy() {
165 return gen_policy()->size_policy();
166 }
167
168 // Return the (conservative) maximum heap alignment
169 static size_t conservative_max_heap_alignment() {
170 return Generation::GenGrain;
172
173 size_t capacity() const;
174 size_t used() const;
175
176 // Save the "used_region" for both generations.
177 void save_used_regions();
178
179 size_t max_capacity() const;
180
181 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
182
183 // We may support a shared contiguous allocation area, if the youngest
184 // generation does.
185 bool supports_inline_contig_alloc() const;
186 HeapWord* volatile* top_addr() const;
187 HeapWord** end_addr() const;
188
189 // Perform a full collection of the heap; intended for use in implementing
190 // "System.gc". This implies as full a collection as the CollectedHeap
191 // supports. Caller does not hold the Heap_lock on entry.
192 virtual void collect(GCCause::Cause cause);
193
194 // The same as above but assume that the caller holds the Heap_lock.
195 void collect_locked(GCCause::Cause cause);
196
197 // Perform a full collection of generations up to and including max_generation.
198 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
199 void collect(GCCause::Cause cause, GenerationType max_generation);
200
201 // Returns "TRUE" iff "p" points into the committed areas of the heap.
202 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
203 // be expensive to compute in general, so, to prevent
204 // their inadvertent use in product jvm's, we restrict their use to
205 // assertion checking or verification only.
206 bool is_in(const void* p) const;
207
208 // override
209 virtual bool is_in_closed_subset(const void* p) const {
210 return is_in(p);
211 }
212
213 // Returns true if the reference is to an object in the reserved space
214 // for the young generation.
215 // Assumes the the young gen address range is less than that of the old gen.
216 bool is_in_young(oop p);
217
218 #ifdef ASSERT
219 bool is_in_partial_collection(const void* p);
220 #endif
221
222 virtual bool is_scavengable(const void* addr) {
223 return is_in_young((oop)addr);
224 }
225
226 // Iteration functions.
227 void oop_iterate_no_header(OopClosure* cl);
228 void oop_iterate(ExtendedOopClosure* cl);
229 void object_iterate(ObjectClosure* cl);
230 void safe_object_iterate(ObjectClosure* cl);
231 Space* space_containing(const void* addr) const;
256 // the block is an object. Assumes (and verifies in non-product
257 // builds) that addr is in the allocated part of the heap and is
258 // the start of a chunk.
259 virtual bool block_is_obj(const HeapWord* addr) const;
260
261 // Section on TLAB's.
262 virtual bool supports_tlab_allocation() const;
263 virtual size_t tlab_capacity(Thread* thr) const;
264 virtual size_t tlab_used(Thread* thr) const;
265 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
266 virtual HeapWord* allocate_new_tlab(size_t size);
267
268 // Can a compiler initialize a new object without store barriers?
269 // This permission only extends from the creation of a new object
270 // via a TLAB up to the first subsequent safepoint.
271 virtual bool can_elide_tlab_store_barriers() const {
272 return true;
273 }
274
275 virtual bool card_mark_must_follow_store() const {
276 return false;
277 }
278
279 // We don't need barriers for stores to objects in the
280 // young gen and, a fortiori, for initializing stores to
281 // objects therein. This applies to DefNew+Tenured and ParNew+CMS
282 // only and may need to be re-examined in case other
283 // kinds of collectors are implemented in the future.
284 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
285 return is_in_young(new_obj);
286 }
287
288 // The "requestor" generation is performing some garbage collection
289 // action for which it would be useful to have scratch space. The
290 // requestor promises to allocate no more than "max_alloc_words" in any
291 // older generation (via promotion say.) Any blocks of space that can
292 // be provided are returned as a list of ScratchBlocks, sorted by
293 // decreasing size.
294 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
295 // Allow each generation to reset any scratch space that it has
296 // contributed as it needs.
477 private:
478 // Accessor for memory state verification support
479 NOT_PRODUCT(
480 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
481 )
482
483 // Override
484 void check_for_non_bad_heap_word_value(HeapWord* addr,
485 size_t size) PRODUCT_RETURN;
486
487 // For use by mark-sweep. As implemented, mark-sweep-compact is global
488 // in an essential way: compaction is performed across generations, by
489 // iterating over spaces.
490 void prepare_for_compaction();
491
492 // Perform a full collection of the generations up to and including max_generation.
493 // This is the low level interface used by the public versions of
494 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
495 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
496
497 // Save the tops of the spaces in all generations
498 void record_gen_tops_before_GC() PRODUCT_RETURN;
499
500 protected:
501 void gc_prologue(bool full);
502 void gc_epilogue(bool full);
503
504 };
505
506 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|