61 };
62
63 private:
64 Generation* _young_gen;
65 Generation* _old_gen;
66
67 // The singleton CardTable Remembered Set.
68 CardTableRS* _rem_set;
69
70 // The generational collector policy.
71 GenCollectorPolicy* _gen_policy;
72
73 // Indicates that the most recent previous incremental collection failed.
74 // The flag is cleared when an action is taken that might clear the
75 // condition that caused that incremental collection to fail.
76 bool _incremental_collection_failed;
77
78 // In support of ExplicitGCInvokesConcurrent functionality
79 unsigned int _full_collections_completed;
80
81 // Data structure for claiming the (potentially) parallel tasks in
82 // (gen-specific) roots processing.
83 SubTasksDone* _process_strong_tasks;
84
85 // Collects the given generation.
86 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
87 bool run_verification, bool clear_soft_refs,
88 bool restore_marks_for_biased_locking);
89
90 // In block contents verification, the number of header words to skip
91 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
92
93 WorkGang* _workers;
94
95 protected:
96 // Helper functions for allocation
97 HeapWord* attempt_allocation(size_t size,
98 bool is_tlab,
99 bool first_only);
100
101 // Helper function for two callbacks below.
102 // Considers collection of the first max_level+1 generations.
103 void do_collection(bool full,
104 bool clear_all_soft_refs,
105 size_t size,
106 bool is_tlab,
107 GenerationType max_generation);
108
109 // Callback from VM_GenCollectForAllocation operation.
110 // This function does everything necessary/possible to satisfy an
111 // allocation request that failed in the youngest generation that should
112 // have handled it (including collection, expansion, etc.)
113 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
114
115 // Callback from VM_GenCollectFull operation.
116 // Perform a full collection of the first max_level+1 generations.
117 virtual void do_full_collection(bool clear_all_soft_refs);
118 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
119
120 // Does the "cause" of GC indicate that
121 // we absolutely __must__ clear soft refs?
122 bool must_clear_all_soft_refs();
123
124 public:
125 GenCollectedHeap(GenCollectorPolicy *policy);
126
127 WorkGang* workers() const { return _workers; }
128
129 // Returns JNI_OK on success
130 virtual jint initialize();
131
132 // Reserve aligned space for the heap as needed by the contained generations.
133 char* allocate(size_t alignment, ReservedSpace* heap_rs);
134
135 // Does operations required after initialization has been done.
136 void post_initialize();
137
138 // Initialize ("weak") refs processing support
139 virtual void ref_processing_init();
140
141 virtual Name kind() const {
142 return CollectedHeap::GenCollectedHeap;
143 }
144
145 virtual const char* name() const {
146 if (UseConcMarkSweepGC) {
147 return "Concurrent Mark Sweep";
148 } else {
149 return "Serial";
150 }
151 }
152
153 Generation* young_gen() const { return _young_gen; }
154 Generation* old_gen() const { return _old_gen; }
155
156 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
157 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
158
159 // The generational collector policy.
160 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
161
162 virtual CollectorPolicy* collector_policy() const { return gen_policy(); }
163
164 // Adaptive size policy
165 virtual AdaptiveSizePolicy* size_policy() {
166 return gen_policy()->size_policy();
167 }
168
169 // Return the (conservative) maximum heap alignment
170 static size_t conservative_max_heap_alignment() {
171 return Generation::GenGrain;
173
174 size_t capacity() const;
175 size_t used() const;
176
177 // Save the "used_region" for both generations.
178 void save_used_regions();
179
180 size_t max_capacity() const;
181
182 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
183
184 // We may support a shared contiguous allocation area, if the youngest
185 // generation does.
186 bool supports_inline_contig_alloc() const;
187 HeapWord* volatile* top_addr() const;
188 HeapWord** end_addr() const;
189
190 // Perform a full collection of the heap; intended for use in implementing
191 // "System.gc". This implies as full a collection as the CollectedHeap
192 // supports. Caller does not hold the Heap_lock on entry.
193 void collect(GCCause::Cause cause);
194
195 // The same as above but assume that the caller holds the Heap_lock.
196 void collect_locked(GCCause::Cause cause);
197
198 // Perform a full collection of generations up to and including max_generation.
199 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
200 void collect(GCCause::Cause cause, GenerationType max_generation);
201
202 // Returns "TRUE" iff "p" points into the committed areas of the heap.
203 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
204 // be expensive to compute in general, so, to prevent
205 // their inadvertent use in product jvm's, we restrict their use to
206 // assertion checking or verification only.
207 bool is_in(const void* p) const;
208
209 // override
210 bool is_in_closed_subset(const void* p) const {
211 if (UseConcMarkSweepGC) {
212 return is_in_reserved(p);
213 } else {
214 return is_in(p);
215 }
216 }
217
218 // Returns true if the reference is to an object in the reserved space
219 // for the young generation.
220 // Assumes the the young gen address range is less than that of the old gen.
221 bool is_in_young(oop p);
222
223 #ifdef ASSERT
224 bool is_in_partial_collection(const void* p);
225 #endif
226
227 virtual bool is_scavengable(const void* addr) {
228 return is_in_young((oop)addr);
229 }
230
231 // Iteration functions.
232 void oop_iterate_no_header(OopClosure* cl);
233 void oop_iterate(ExtendedOopClosure* cl);
234 void object_iterate(ObjectClosure* cl);
235 void safe_object_iterate(ObjectClosure* cl);
236 Space* space_containing(const void* addr) const;
261 // the block is an object. Assumes (and verifies in non-product
262 // builds) that addr is in the allocated part of the heap and is
263 // the start of a chunk.
264 virtual bool block_is_obj(const HeapWord* addr) const;
265
266 // Section on TLAB's.
267 virtual bool supports_tlab_allocation() const;
268 virtual size_t tlab_capacity(Thread* thr) const;
269 virtual size_t tlab_used(Thread* thr) const;
270 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
271 virtual HeapWord* allocate_new_tlab(size_t size);
272
273 // Can a compiler initialize a new object without store barriers?
274 // This permission only extends from the creation of a new object
275 // via a TLAB up to the first subsequent safepoint.
276 virtual bool can_elide_tlab_store_barriers() const {
277 return true;
278 }
279
280 virtual bool card_mark_must_follow_store() const {
281 return UseConcMarkSweepGC;
282 }
283
284 // We don't need barriers for stores to objects in the
285 // young gen and, a fortiori, for initializing stores to
286 // objects therein. This applies to DefNew+Tenured and ParNew+CMS
287 // only and may need to be re-examined in case other
288 // kinds of collectors are implemented in the future.
289 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
290 return is_in_young(new_obj);
291 }
292
293 // The "requestor" generation is performing some garbage collection
294 // action for which it would be useful to have scratch space. The
295 // requestor promises to allocate no more than "max_alloc_words" in any
296 // older generation (via promotion say.) Any blocks of space that can
297 // be provided are returned as a list of ScratchBlocks, sorted by
298 // decreasing size.
299 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
300 // Allow each generation to reset any scratch space that it has
301 // contributed as it needs.
327 }
328
329 // Update the gc statistics for each generation.
330 void update_gc_stats(Generation* current_generation, bool full) {
331 _old_gen->update_gc_stats(current_generation, full);
332 }
333
334 bool no_gc_in_progress() { return !is_gc_active(); }
335
336 // Override.
337 void prepare_for_verify();
338
339 // Override.
340 void verify(VerifyOption option);
341
342 // Override.
343 virtual void print_on(outputStream* st) const;
344 virtual void print_gc_threads_on(outputStream* st) const;
345 virtual void gc_threads_do(ThreadClosure* tc) const;
346 virtual void print_tracing_info() const;
347 virtual void print_on_error(outputStream* st) const;
348
349 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
350
351 // The functions below are helper functions that a subclass of
352 // "CollectedHeap" can use in the implementation of its virtual
353 // functions.
354
355 class GenClosure : public StackObj {
356 public:
357 virtual void do_generation(Generation* gen) = 0;
358 };
359
360 // Apply "cl.do_generation" to all generations in the heap
361 // If "old_to_young" determines the order.
362 void generation_iterate(GenClosure* cl, bool old_to_young);
363
364 // Return "true" if all generations have reached the
365 // maximal committed limit that they can reach, without a garbage
366 // collection.
367 virtual bool is_maximal_no_gc() const;
368
369 // This function returns the CardTableRS object that allows us to scan
370 // generations in a fully generational heap.
371 CardTableRS* rem_set() { return _rem_set; }
372
373 // Convenience function to be used in situations where the heap type can be
374 // asserted to be this type.
375 static GenCollectedHeap* heap();
376
377 // The ScanningOption determines which of the roots
378 // the closure is applied to:
379 // "SO_None" does none;
380 enum ScanningOption {
381 SO_None = 0x0,
382 SO_AllCodeCache = 0x8,
383 SO_ScavengeCodeCache = 0x10
384 };
385
386 private:
387 void process_roots(StrongRootsScope* scope,
388 ScanningOption so,
389 OopClosure* strong_roots,
390 OopClosure* weak_roots,
391 CLDClosure* strong_cld_closure,
392 CLDClosure* weak_cld_closure,
393 CodeBlobToOopClosure* code_roots);
394
395 void process_string_table_roots(StrongRootsScope* scope,
396 OopClosure* root_closure);
397
398 public:
399 void young_process_roots(StrongRootsScope* scope,
400 OopsInGenClosure* root_closure,
401 OopsInGenClosure* old_gen_closure,
402 CLDClosure* cld_closure);
403
404 // If "young_gen_as_roots" is false, younger generations are
405 // not scanned as roots; in this case, the caller must be arranging to
406 // scan the younger generations itself. (For example, a generation might
407 // explicitly mark reachable objects in younger generations, to avoid
408 // excess storage retention.)
409 void cms_process_roots(StrongRootsScope* scope,
410 bool young_gen_as_roots,
411 ScanningOption so,
412 bool only_strong_roots,
413 OopsInGenClosure* root_closure,
414 CLDClosure* cld_closure);
415
416 void full_process_roots(StrongRootsScope* scope,
417 bool is_adjust_phase,
418 ScanningOption so,
419 bool only_strong_roots,
420 OopsInGenClosure* root_closure,
421 CLDClosure* cld_closure);
422
423 // Apply "root_closure" to all the weak roots of the system.
424 // These include JNI weak roots, string table,
425 // and referents of reachable weak refs.
426 void gen_process_weak_roots(OopClosure* root_closure);
427
428 // Set the saved marks of generations, if that makes sense.
429 // In particular, if any generation might iterate over the oops
430 // in other generations, it should call this method.
431 void save_marks();
432
433 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
434 // allocated since the last call to save_marks in generations at or above
435 // "level". The "cur" closure is
462 // If a generation bails out of an incremental collection,
463 // it sets this flag.
464 bool incremental_collection_failed() const {
465 return _incremental_collection_failed;
466 }
467 void set_incremental_collection_failed() {
468 _incremental_collection_failed = true;
469 }
470 void clear_incremental_collection_failed() {
471 _incremental_collection_failed = false;
472 }
473
474 // Promotion of obj into gen failed. Try to promote obj to higher
475 // gens in ascending order; return the new location of obj if successful.
476 // Otherwise, try expand-and-allocate for obj in both the young and old
477 // generation; return the new location of obj if successful. Otherwise, return NULL.
478 oop handle_failed_promotion(Generation* old_gen,
479 oop obj,
480 size_t obj_size);
481
482 private:
483 // Accessor for memory state verification support
484 NOT_PRODUCT(
485 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
486 )
487
488 // Override
489 void check_for_non_bad_heap_word_value(HeapWord* addr,
490 size_t size) PRODUCT_RETURN;
491
492 // For use by mark-sweep. As implemented, mark-sweep-compact is global
493 // in an essential way: compaction is performed across generations, by
494 // iterating over spaces.
495 void prepare_for_compaction();
496
497 // Perform a full collection of the generations up to and including max_generation.
498 // This is the low level interface used by the public versions of
499 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
500 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
501
502 // Returns success or failure.
503 bool create_cms_collector();
504
505 // In support of ExplicitGCInvokesConcurrent functionality
506 bool should_do_concurrent_full_gc(GCCause::Cause cause);
507 void collect_mostly_concurrent(GCCause::Cause cause);
508
509 // Save the tops of the spaces in all generations
510 void record_gen_tops_before_GC() PRODUCT_RETURN;
511
512 protected:
513 void gc_prologue(bool full);
514 void gc_epilogue(bool full);
515
516 public:
517 void stop();
518 };
519
520 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|
61 };
62
63 private:
64 Generation* _young_gen;
65 Generation* _old_gen;
66
67 // The singleton CardTable Remembered Set.
68 CardTableRS* _rem_set;
69
70 // The generational collector policy.
71 GenCollectorPolicy* _gen_policy;
72
73 // Indicates that the most recent previous incremental collection failed.
74 // The flag is cleared when an action is taken that might clear the
75 // condition that caused that incremental collection to fail.
76 bool _incremental_collection_failed;
77
78 // In support of ExplicitGCInvokesConcurrent functionality
79 unsigned int _full_collections_completed;
80
81 // Collects the given generation.
82 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
83 bool run_verification, bool clear_soft_refs,
84 bool restore_marks_for_biased_locking);
85
86 protected:
87
88 // The set of potentially parallel tasks in root scanning.
89 enum GCH_strong_roots_tasks {
90 GCH_PS_Universe_oops_do,
91 GCH_PS_JNIHandles_oops_do,
92 GCH_PS_ObjectSynchronizer_oops_do,
93 GCH_PS_FlatProfiler_oops_do,
94 GCH_PS_Management_oops_do,
95 GCH_PS_SystemDictionary_oops_do,
96 GCH_PS_ClassLoaderDataGraph_oops_do,
97 GCH_PS_jvmti_oops_do,
98 GCH_PS_CodeCache_oops_do,
99 GCH_PS_aot_oops_do,
100 GCH_PS_younger_gens,
101 // Leave this one last.
102 GCH_PS_NumElements
103 };
104
105 // Data structure for claiming the (potentially) parallel tasks in
106 // (gen-specific) roots processing.
107 SubTasksDone* _process_strong_tasks;
108
109 // Helper functions for allocation
110 HeapWord* attempt_allocation(size_t size,
111 bool is_tlab,
112 bool first_only);
113
114 // Helper function for two callbacks below.
115 // Considers collection of the first max_level+1 generations.
116 void do_collection(bool full,
117 bool clear_all_soft_refs,
118 size_t size,
119 bool is_tlab,
120 GenerationType max_generation);
121
122 // Callback from VM_GenCollectForAllocation operation.
123 // This function does everything necessary/possible to satisfy an
124 // allocation request that failed in the youngest generation that should
125 // have handled it (including collection, expansion, etc.)
126 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
127
128 // Callback from VM_GenCollectFull operation.
129 // Perform a full collection of the first max_level+1 generations.
130 virtual void do_full_collection(bool clear_all_soft_refs);
131 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
132
133 // Does the "cause" of GC indicate that
134 // we absolutely __must__ clear soft refs?
135 bool must_clear_all_soft_refs();
136
137 public:
138 GenCollectedHeap(GenCollectorPolicy *policy);
139
140 // Returns JNI_OK on success
141 virtual jint initialize();
142
143 // Reserve aligned space for the heap as needed by the contained generations.
144 char* allocate(size_t alignment, ReservedSpace* heap_rs);
145
146 // Does operations required after initialization has been done.
147 void post_initialize();
148
149 virtual void check_gen_kinds();
150
151 // Initialize ("weak") refs processing support
152 virtual void ref_processing_init();
153
154 virtual Name kind() const {
155 return CollectedHeap::GenCollectedHeap;
156 }
157
158 virtual const char* name() const {
159 return "Serial";
160 }
161
162 Generation* young_gen() const { return _young_gen; }
163 Generation* old_gen() const { return _old_gen; }
164
165 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
166 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
167
168 // The generational collector policy.
169 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
170
171 virtual CollectorPolicy* collector_policy() const { return gen_policy(); }
172
173 // Adaptive size policy
174 virtual AdaptiveSizePolicy* size_policy() {
175 return gen_policy()->size_policy();
176 }
177
178 // Return the (conservative) maximum heap alignment
179 static size_t conservative_max_heap_alignment() {
180 return Generation::GenGrain;
182
183 size_t capacity() const;
184 size_t used() const;
185
186 // Save the "used_region" for both generations.
187 void save_used_regions();
188
189 size_t max_capacity() const;
190
191 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
192
193 // We may support a shared contiguous allocation area, if the youngest
194 // generation does.
195 bool supports_inline_contig_alloc() const;
196 HeapWord* volatile* top_addr() const;
197 HeapWord** end_addr() const;
198
199 // Perform a full collection of the heap; intended for use in implementing
200 // "System.gc". This implies as full a collection as the CollectedHeap
201 // supports. Caller does not hold the Heap_lock on entry.
202 virtual void collect(GCCause::Cause cause);
203
204 // The same as above but assume that the caller holds the Heap_lock.
205 void collect_locked(GCCause::Cause cause);
206
207 // Perform a full collection of generations up to and including max_generation.
208 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
209 void collect(GCCause::Cause cause, GenerationType max_generation);
210
211 // Returns "TRUE" iff "p" points into the committed areas of the heap.
212 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
213 // be expensive to compute in general, so, to prevent
214 // their inadvertent use in product jvm's, we restrict their use to
215 // assertion checking or verification only.
216 bool is_in(const void* p) const;
217
218 // override
219 virtual bool is_in_closed_subset(const void* p) const {
220 return is_in(p);
221 }
222
223 // Returns true if the reference is to an object in the reserved space
224 // for the young generation.
225 // Assumes the the young gen address range is less than that of the old gen.
226 bool is_in_young(oop p);
227
228 #ifdef ASSERT
229 bool is_in_partial_collection(const void* p);
230 #endif
231
232 virtual bool is_scavengable(const void* addr) {
233 return is_in_young((oop)addr);
234 }
235
236 // Iteration functions.
237 void oop_iterate_no_header(OopClosure* cl);
238 void oop_iterate(ExtendedOopClosure* cl);
239 void object_iterate(ObjectClosure* cl);
240 void safe_object_iterate(ObjectClosure* cl);
241 Space* space_containing(const void* addr) const;
266 // the block is an object. Assumes (and verifies in non-product
267 // builds) that addr is in the allocated part of the heap and is
268 // the start of a chunk.
269 virtual bool block_is_obj(const HeapWord* addr) const;
270
271 // Section on TLAB's.
272 virtual bool supports_tlab_allocation() const;
273 virtual size_t tlab_capacity(Thread* thr) const;
274 virtual size_t tlab_used(Thread* thr) const;
275 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
276 virtual HeapWord* allocate_new_tlab(size_t size);
277
278 // Can a compiler initialize a new object without store barriers?
279 // This permission only extends from the creation of a new object
280 // via a TLAB up to the first subsequent safepoint.
281 virtual bool can_elide_tlab_store_barriers() const {
282 return true;
283 }
284
285 virtual bool card_mark_must_follow_store() const {
286 return false;
287 }
288
289 // We don't need barriers for stores to objects in the
290 // young gen and, a fortiori, for initializing stores to
291 // objects therein. This applies to DefNew+Tenured and ParNew+CMS
292 // only and may need to be re-examined in case other
293 // kinds of collectors are implemented in the future.
294 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
295 return is_in_young(new_obj);
296 }
297
298 // The "requestor" generation is performing some garbage collection
299 // action for which it would be useful to have scratch space. The
300 // requestor promises to allocate no more than "max_alloc_words" in any
301 // older generation (via promotion say.) Any blocks of space that can
302 // be provided are returned as a list of ScratchBlocks, sorted by
303 // decreasing size.
304 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
305 // Allow each generation to reset any scratch space that it has
306 // contributed as it needs.
332 }
333
334 // Update the gc statistics for each generation.
335 void update_gc_stats(Generation* current_generation, bool full) {
336 _old_gen->update_gc_stats(current_generation, full);
337 }
338
339 bool no_gc_in_progress() { return !is_gc_active(); }
340
341 // Override.
342 void prepare_for_verify();
343
344 // Override.
345 void verify(VerifyOption option);
346
347 // Override.
348 virtual void print_on(outputStream* st) const;
349 virtual void print_gc_threads_on(outputStream* st) const;
350 virtual void gc_threads_do(ThreadClosure* tc) const;
351 virtual void print_tracing_info() const;
352
353 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
354
355 // The functions below are helper functions that a subclass of
356 // "CollectedHeap" can use in the implementation of its virtual
357 // functions.
358
359 class GenClosure : public StackObj {
360 public:
361 virtual void do_generation(Generation* gen) = 0;
362 };
363
364 // Apply "cl.do_generation" to all generations in the heap
365 // If "old_to_young" determines the order.
366 void generation_iterate(GenClosure* cl, bool old_to_young);
367
368 // Return "true" if all generations have reached the
369 // maximal committed limit that they can reach, without a garbage
370 // collection.
371 virtual bool is_maximal_no_gc() const;
372
373 // This function returns the CardTableRS object that allows us to scan
374 // generations in a fully generational heap.
375 CardTableRS* rem_set() { return _rem_set; }
376
377 // Convenience function to be used in situations where the heap type can be
378 // asserted to be this type.
379 static GenCollectedHeap* heap();
380
381 // The ScanningOption determines which of the roots
382 // the closure is applied to:
383 // "SO_None" does none;
384 enum ScanningOption {
385 SO_None = 0x0,
386 SO_AllCodeCache = 0x8,
387 SO_ScavengeCodeCache = 0x10
388 };
389
390 protected:
391 void process_roots(StrongRootsScope* scope,
392 ScanningOption so,
393 OopClosure* strong_roots,
394 OopClosure* weak_roots,
395 CLDClosure* strong_cld_closure,
396 CLDClosure* weak_cld_closure,
397 CodeBlobToOopClosure* code_roots);
398
399 void process_string_table_roots(StrongRootsScope* scope,
400 OopClosure* root_closure);
401
402 // Accessor for memory state verification support
403 NOT_PRODUCT(
404 virtual size_t skip_header_HeapWords() { return 0; }
405 )
406
407 virtual void gc_prologue(bool full);
408 virtual void gc_epilogue(bool full);
409
410 public:
411 void young_process_roots(StrongRootsScope* scope,
412 OopsInGenClosure* root_closure,
413 OopsInGenClosure* old_gen_closure,
414 CLDClosure* cld_closure);
415
416 void full_process_roots(StrongRootsScope* scope,
417 bool is_adjust_phase,
418 ScanningOption so,
419 bool only_strong_roots,
420 OopsInGenClosure* root_closure,
421 CLDClosure* cld_closure);
422
423 // Apply "root_closure" to all the weak roots of the system.
424 // These include JNI weak roots, string table,
425 // and referents of reachable weak refs.
426 void gen_process_weak_roots(OopClosure* root_closure);
427
428 // Set the saved marks of generations, if that makes sense.
429 // In particular, if any generation might iterate over the oops
430 // in other generations, it should call this method.
431 void save_marks();
432
433 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
434 // allocated since the last call to save_marks in generations at or above
435 // "level". The "cur" closure is
462 // If a generation bails out of an incremental collection,
463 // it sets this flag.
464 bool incremental_collection_failed() const {
465 return _incremental_collection_failed;
466 }
467 void set_incremental_collection_failed() {
468 _incremental_collection_failed = true;
469 }
470 void clear_incremental_collection_failed() {
471 _incremental_collection_failed = false;
472 }
473
474 // Promotion of obj into gen failed. Try to promote obj to higher
475 // gens in ascending order; return the new location of obj if successful.
476 // Otherwise, try expand-and-allocate for obj in both the young and old
477 // generation; return the new location of obj if successful. Otherwise, return NULL.
478 oop handle_failed_promotion(Generation* old_gen,
479 oop obj,
480 size_t obj_size);
481
482
483 private:
484 // Override
485 void check_for_non_bad_heap_word_value(HeapWord* addr,
486 size_t size) PRODUCT_RETURN;
487
488 // For use by mark-sweep. As implemented, mark-sweep-compact is global
489 // in an essential way: compaction is performed across generations, by
490 // iterating over spaces.
491 void prepare_for_compaction();
492
493 // Perform a full collection of the generations up to and including max_generation.
494 // This is the low level interface used by the public versions of
495 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
496 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
497
498 // Save the tops of the spaces in all generations
499 void record_gen_tops_before_GC() PRODUCT_RETURN;
500 };
501
502 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|