16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
27
28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/generation.hpp"
31 #include "memory/sharedHeap.hpp"
32
33 class SubTasksDone;
34
35 // A "GenCollectedHeap" is a SharedHeap that uses generational
36 // collection. It is represented with a sequence of Generation's.
37 class GenCollectedHeap : public SharedHeap {
38 friend class GenCollectorPolicy;
39 friend class Generation;
40 friend class DefNewGeneration;
41 friend class TenuredGeneration;
42 friend class ConcurrentMarkSweepGeneration;
43 friend class CMSCollector;
44 friend class GenMarkSweep;
45 friend class VM_GenCollectForAllocation;
46 friend class VM_GenCollectFull;
47 friend class VM_GenCollectFullConcurrent;
48 friend class VM_GC_HeapInspection;
49 friend class VM_HeapDumper;
50 friend class HeapInspection;
51 friend class GCCauseSetter;
52 friend class VMStructs;
53 public:
54 enum SomeConstants {
55 max_gens = 10
56 };
57
58 friend class VM_PopulateDumpSharedSpace;
59
60 protected:
61 // Fields:
62 static GenCollectedHeap* _gch;
63
64 private:
65 int _n_gens;
66 Generation* _gens[max_gens];
67 GenerationSpec** _gen_specs;
68
69 // The generational collector policy.
70 GenCollectorPolicy* _gen_policy;
71
72 // Indicates that the most recent previous incremental collection failed.
73 // The flag is cleared when an action is taken that might clear the
74 // condition that caused that incremental collection to fail.
75 bool _incremental_collection_failed;
76
77 // In support of ExplicitGCInvokesConcurrent functionality
78 unsigned int _full_collections_completed;
79
80 // Data structure for claiming the (potentially) parallel tasks in
81 // (gen-specific) roots processing.
82 SubTasksDone* _gen_process_roots_tasks;
83 SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
84
85 // In block contents verification, the number of header words to skip
86 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
87
88 protected:
89 // Helper functions for allocation
90 HeapWord* attempt_allocation(size_t size,
91 bool is_tlab,
92 bool first_only);
93
94 // Helper function for two callbacks below.
95 // Considers collection of the first max_level+1 generations.
96 void do_collection(bool full,
97 bool clear_all_soft_refs,
98 size_t size,
99 bool is_tlab,
100 int max_level);
101
102 // Callback from VM_GenCollectForAllocation operation.
103 // This function does everything necessary/possible to satisfy an
104 // allocation request that failed in the youngest generation that should
105 // have handled it (including collection, expansion, etc.)
106 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
107
108 // Callback from VM_GenCollectFull operation.
109 // Perform a full collection of the first max_level+1 generations.
110 virtual void do_full_collection(bool clear_all_soft_refs);
111 void do_full_collection(bool clear_all_soft_refs, int max_level);
112
113 // Does the "cause" of GC indicate that
114 // we absolutely __must__ clear soft refs?
115 bool must_clear_all_soft_refs();
116
117 public:
118 GenCollectedHeap(GenCollectorPolicy *policy);
119
120 GCStats* gc_stats(int level) const;
121
122 // Returns JNI_OK on success
123 virtual jint initialize();
124 char* allocate(size_t alignment,
125 size_t* _total_reserved, int* _n_covered_regions,
126 ReservedSpace* heap_rs);
127
128 // Does operations required after initialization has been done.
129 void post_initialize();
130
131 // Initialize ("weak") refs processing support
132 virtual void ref_processing_init();
133
134 virtual CollectedHeap::Name kind() const {
135 return CollectedHeap::GenCollectedHeap;
136 }
137
138 // The generational collector policy.
139 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
140 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
141
142 // Adaptive size policy
143 virtual AdaptiveSizePolicy* size_policy() {
144 return gen_policy()->size_policy();
145 }
146
147 // Return the (conservative) maximum heap alignment
148 static size_t conservative_max_heap_alignment() {
149 return Generation::GenGrain;
150 }
151
152 size_t capacity() const;
153 size_t used() const;
154
155 // Save the "used_region" for generations level and lower.
156 void save_used_regions(int level);
157
158 size_t max_capacity() const;
159
160 HeapWord* mem_allocate(size_t size,
161 bool* gc_overhead_limit_was_exceeded);
162
163 // We may support a shared contiguous allocation area, if the youngest
164 // generation does.
165 bool supports_inline_contig_alloc() const;
166 HeapWord** top_addr() const;
167 HeapWord** end_addr() const;
168
169 // Does this heap support heap inspection? (+PrintClassHistogram)
170 virtual bool supports_heap_inspection() const { return true; }
171
172 // Perform a full collection of the heap; intended for use in implementing
173 // "System.gc". This implies as full a collection as the CollectedHeap
174 // supports. Caller does not hold the Heap_lock on entry.
175 void collect(GCCause::Cause cause);
176
177 // The same as above but assume that the caller holds the Heap_lock.
178 void collect_locked(GCCause::Cause cause);
179
180 // Perform a full collection of the first max_level+1 generations.
181 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
182 void collect(GCCause::Cause cause, int max_level);
183
184 // Returns "TRUE" iff "p" points into the committed areas of the heap.
185 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
186 // be expensive to compute in general, so, to prevent
187 // their inadvertent use in product jvm's, we restrict their use to
188 // assertion checking or verification only.
189 bool is_in(const void* p) const;
190
191 // override
192 bool is_in_closed_subset(const void* p) const {
193 if (UseConcMarkSweepGC) {
194 return is_in_reserved(p);
195 } else {
196 return is_in(p);
197 }
198 }
199
200 // Returns true if the reference is to an object in the reserved space
201 // for the young generation.
202 // Assumes the the young gen address range is less than that of the old gen.
289
290 // Ensure parsability: override
291 virtual void ensure_parsability(bool retire_tlabs);
292
293 // Time in ms since the longest time a collector ran in
294 // in any generation.
295 virtual jlong millis_since_last_gc();
296
297 // Total number of full collections completed.
298 unsigned int total_full_collections_completed() {
299 assert(_full_collections_completed <= _total_full_collections,
300 "Can't complete more collections than were started");
301 return _full_collections_completed;
302 }
303
304 // Update above counter, as appropriate, at the end of a stop-world GC cycle
305 unsigned int update_full_collections_completed();
306 // Update above counter, as appropriate, at the end of a concurrent GC cycle
307 unsigned int update_full_collections_completed(unsigned int count);
308
309 // Update "time of last gc" for all constituent generations
310 // to "now".
311 void update_time_of_last_gc(jlong now) {
312 for (int i = 0; i < _n_gens; i++) {
313 _gens[i]->update_time_of_last_gc(now);
314 }
315 }
316
317 // Update the gc statistics for each generation.
318 // "level" is the level of the latest collection.
319 void update_gc_stats(int current_level, bool full) {
320 for (int i = 0; i < _n_gens; i++) {
321 _gens[i]->update_gc_stats(current_level, full);
322 }
323 }
324
325 // Override.
326 bool no_gc_in_progress() { return !is_gc_active(); }
327
328 // Override.
329 void prepare_for_verify();
330
331 // Override.
332 void verify(bool silent, VerifyOption option);
333
334 // Override.
335 virtual void print_on(outputStream* st) const;
336 virtual void print_gc_threads_on(outputStream* st) const;
337 virtual void gc_threads_do(ThreadClosure* tc) const;
338 virtual void print_tracing_info() const;
339 virtual void print_on_error(outputStream* st) const;
340
341 // PrintGC, PrintGCDetails support
342 void print_heap_change(size_t prev_used) const;
344 // The functions below are helper functions that a subclass of
345 // "CollectedHeap" can use in the implementation of its virtual
346 // functions.
347
348 class GenClosure : public StackObj {
349 public:
350 virtual void do_generation(Generation* gen) = 0;
351 };
352
353 // Apply "cl.do_generation" to all generations in the heap
354 // If "old_to_young" determines the order.
355 void generation_iterate(GenClosure* cl, bool old_to_young);
356
357 void space_iterate(SpaceClosure* cl);
358
359 // Return "true" if all generations have reached the
360 // maximal committed limit that they can reach, without a garbage
361 // collection.
362 virtual bool is_maximal_no_gc() const;
363
364 // Return the generation before "gen".
365 Generation* prev_gen(Generation* gen) const {
366 int l = gen->level();
367 guarantee(l > 0, "Out of bounds");
368 return _gens[l-1];
369 }
370
371 // Return the generation after "gen".
372 Generation* next_gen(Generation* gen) const {
373 int l = gen->level() + 1;
374 guarantee(l < _n_gens, "Out of bounds");
375 return _gens[l];
376 }
377
378 Generation* get_gen(int i) const {
379 guarantee(i >= 0 && i < _n_gens, "Out of bounds");
380 return _gens[i];
381 }
382
383 int n_gens() const {
384 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
385 return _n_gens;
386 }
387
388 // Convenience function to be used in situations where the heap type can be
389 // asserted to be this type.
390 static GenCollectedHeap* heap();
391
392 void set_par_threads(uint t);
393
394 // Invoke the "do_oop" method of one of the closures "not_older_gens"
395 // or "older_gens" on root locations for the generation at
396 // "level". (The "older_gens" closure is used for scanning references
397 // from older generations; "not_older_gens" is used everywhere else.)
398 // If "younger_gens_as_roots" is false, younger generations are
399 // not scanned as roots; in this case, the caller must be arranging to
400 // scan the younger generations itself. (For example, a generation might
401 // explicitly mark reachable objects in younger generations, to avoid
402 // excess storage retention.)
403 // The "so" argument determines which of the roots
404 // the closure is applied to:
405 // "SO_None" does none;
406 private:
407 void gen_process_roots(int level,
408 bool younger_gens_as_roots,
409 bool activate_scope,
410 SharedHeap::ScanningOption so,
411 OopsInGenClosure* not_older_gens,
412 OopsInGenClosure* weak_roots,
413 OopsInGenClosure* older_gens,
414 CLDClosure* cld_closure,
415 CLDClosure* weak_cld_closure,
416 CodeBlobClosure* code_closure);
417
418 public:
419 static const bool StrongAndWeakRoots = false;
420 static const bool StrongRootsOnly = true;
421
422 void gen_process_roots(int level,
423 bool younger_gens_as_roots,
424 bool activate_scope,
425 SharedHeap::ScanningOption so,
426 bool only_strong_roots,
427 OopsInGenClosure* not_older_gens,
428 OopsInGenClosure* older_gens,
429 CLDClosure* cld_closure);
430
431 // Apply "root_closure" to all the weak roots of the system.
432 // These include JNI weak roots, string table,
433 // and referents of reachable weak refs.
434 void gen_process_weak_roots(OopClosure* root_closure);
435
436 // Set the saved marks of generations, if that makes sense.
437 // In particular, if any generation might iterate over the oops
438 // in other generations, it should call this method.
439 void save_marks();
440
441 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
442 // allocated since the last call to save_marks in generations at or above
443 // "level". The "cur" closure is
444 // applied to references in the generation at "level", and the "older"
445 // closure to older generations.
446 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
447 void oop_since_save_marks_iterate(int level, \
448 OopClosureType* cur, \
449 OopClosureType* older);
450
451 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
452
453 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
454
455 // Returns "true" iff no allocations have occurred in any generation at
456 // "level" or above since the last
457 // call to "save_marks".
458 bool no_allocs_since_save_marks(int level);
459
460 // Returns true if an incremental collection is likely to fail.
461 // We optionally consult the young gen, if asked to do so;
462 // otherwise we base our answer on whether the previous incremental
463 // collection attempt failed with no corrective action as of yet.
464 bool incremental_collection_will_fail(bool consult_young) {
465 // Assumes a 2-generation system; the first disjunct remembers if an
466 // incremental collection failed, even when we thought (second disjunct)
467 // that it would not.
468 assert(heap()->collector_policy()->is_generation_policy(),
469 "the following definition may not be suitable for an n(>2)-generation system");
470 return incremental_collection_failed() ||
471 (consult_young && !get_gen(0)->collection_attempt_is_safe());
472 }
473
474 // If a generation bails out of an incremental collection,
475 // it sets this flag.
476 bool incremental_collection_failed() const {
477 return _incremental_collection_failed;
478 }
479 void set_incremental_collection_failed() {
480 _incremental_collection_failed = true;
481 }
482 void clear_incremental_collection_failed() {
483 _incremental_collection_failed = false;
484 }
485
486 // Promotion of obj into gen failed. Try to promote obj to higher
487 // gens in ascending order; return the new location of obj if successful.
488 // Otherwise, try expand-and-allocate for obj in both the young and old
489 // generation; return the new location of obj if successful. Otherwise, return NULL.
490 oop handle_failed_promotion(Generation* old_gen,
491 oop obj,
492 size_t obj_size);
493
494 private:
495 // Accessor for memory state verification support
496 NOT_PRODUCT(
497 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
498 )
499
500 // Override
501 void check_for_non_bad_heap_word_value(HeapWord* addr,
502 size_t size) PRODUCT_RETURN;
503
504 // For use by mark-sweep. As implemented, mark-sweep-compact is global
505 // in an essential way: compaction is performed across generations, by
506 // iterating over spaces.
507 void prepare_for_compaction();
508
509 // Perform a full collection of the first max_level+1 generations.
510 // This is the low level interface used by the public versions of
511 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
512 void collect_locked(GCCause::Cause cause, int max_level);
513
514 // Returns success or failure.
515 bool create_cms_collector();
516
517 // In support of ExplicitGCInvokesConcurrent functionality
518 bool should_do_concurrent_full_gc(GCCause::Cause cause);
519 void collect_mostly_concurrent(GCCause::Cause cause);
520
521 // Save the tops of the spaces in all generations
522 void record_gen_tops_before_GC() PRODUCT_RETURN;
523
524 protected:
525 virtual void gc_prologue(bool full);
526 virtual void gc_epilogue(bool full);
527 };
528
529 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
27
28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/generation.hpp"
31 #include "memory/sharedHeap.hpp"
32
33 class SubTasksDone;
34
35 // A "GenCollectedHeap" is a SharedHeap that uses generational
36 // collection. It has two generations, young and old.
37 class GenCollectedHeap : public SharedHeap {
38 friend class GenCollectorPolicy;
39 friend class Generation;
40 friend class DefNewGeneration;
41 friend class TenuredGeneration;
42 friend class ConcurrentMarkSweepGeneration;
43 friend class CMSCollector;
44 friend class GenMarkSweep;
45 friend class VM_GenCollectForAllocation;
46 friend class VM_GenCollectFull;
47 friend class VM_GenCollectFullConcurrent;
48 friend class VM_GC_HeapInspection;
49 friend class VM_HeapDumper;
50 friend class HeapInspection;
51 friend class GCCauseSetter;
52 friend class VMStructs;
53 public:
54 friend class VM_PopulateDumpSharedSpace;
55
56 protected:
57 // Fields:
58 static GenCollectedHeap* _gch;
59
60 private:
61 Generation* _young_gen;
62 Generation* _old_gen;
63
64 // The generational collector policy.
65 GenCollectorPolicy* _gen_policy;
66
67 // Indicates that the most recent previous incremental collection failed.
68 // The flag is cleared when an action is taken that might clear the
69 // condition that caused that incremental collection to fail.
70 bool _incremental_collection_failed;
71
72 // In support of ExplicitGCInvokesConcurrent functionality
73 unsigned int _full_collections_completed;
74
75 // Data structure for claiming the (potentially) parallel tasks in
76 // (gen-specific) roots processing.
77 SubTasksDone* _gen_process_roots_tasks;
78 SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
79
80 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
81 bool run_verification, bool clear_soft_refs);
82
83 // In block contents verification, the number of header words to skip
84 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
85
86 protected:
87 // Helper functions for allocation
88 HeapWord* attempt_allocation(size_t size,
89 bool is_tlab,
90 bool first_only);
91
92 // Helper function for two callbacks below.
93 // Considers collection of the first max_level+1 generations.
94 void do_collection(bool full,
95 bool clear_all_soft_refs,
96 size_t size,
97 bool is_tlab,
98 Generation::Type max_generation);
99
100 // Callback from VM_GenCollectForAllocation operation.
101 // This function does everything necessary/possible to satisfy an
102 // allocation request that failed in the youngest generation that should
103 // have handled it (including collection, expansion, etc.)
104 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
105
106 // Callback from VM_GenCollectFull operation.
107 // Perform a full collection of the first max_level+1 generations.
108 virtual void do_full_collection(bool clear_all_soft_refs);
109 void do_full_collection(bool clear_all_soft_refs, Generation::Type max_gen);
110
111 // Does the "cause" of GC indicate that
112 // we absolutely __must__ clear soft refs?
113 bool must_clear_all_soft_refs();
114
115 public:
116 GenCollectedHeap(GenCollectorPolicy *policy);
117
118 GCStats* gc_stats(Generation* gen) const;
119
120 // Returns JNI_OK on success
121 virtual jint initialize();
122
123 char* allocate(size_t alignment,
124 size_t* _total_reserved, int* _n_covered_regions,
125 ReservedSpace* heap_rs);
126
127 // Does operations required after initialization has been done.
128 void post_initialize();
129
130 // Initialize ("weak") refs processing support
131 virtual void ref_processing_init();
132
133 virtual CollectedHeap::Name kind() const {
134 return CollectedHeap::GenCollectedHeap;
135 }
136
137 Generation* young_gen() const { return _young_gen; }
138 Generation* old_gen() const { return _old_gen; }
139
140 // The generational collector policy.
141 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
142
143 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
144
145 // Adaptive size policy
146 virtual AdaptiveSizePolicy* size_policy() {
147 return gen_policy()->size_policy();
148 }
149
150 // Return the (conservative) maximum heap alignment
151 static size_t conservative_max_heap_alignment() {
152 return Generation::GenGrain;
153 }
154
155 size_t capacity() const;
156 size_t used() const;
157
158 // Save the "used_region" for both generations.
159 void save_used_regions();
160
161 size_t max_capacity() const;
162
163 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
164
165 // We may support a shared contiguous allocation area, if the youngest
166 // generation does.
167 bool supports_inline_contig_alloc() const;
168 HeapWord** top_addr() const;
169 HeapWord** end_addr() const;
170
171 // Does this heap support heap inspection? (+PrintClassHistogram)
172 virtual bool supports_heap_inspection() const { return true; }
173
174 // Perform a full collection of the heap; intended for use in implementing
175 // "System.gc". This implies as full a collection as the CollectedHeap
176 // supports. Caller does not hold the Heap_lock on entry.
177 void collect(GCCause::Cause cause);
178
179 // The same as above but assume that the caller holds the Heap_lock.
180 void collect_locked(GCCause::Cause cause);
181
182 // Perform a full collection of generations up to and including max_gen.
183 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
184 void collect(GCCause::Cause cause, Generation::Type max_gen);
185
186 // Returns "TRUE" iff "p" points into the committed areas of the heap.
187 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
188 // be expensive to compute in general, so, to prevent
189 // their inadvertent use in product jvm's, we restrict their use to
190 // assertion checking or verification only.
191 bool is_in(const void* p) const;
192
193 // override
194 bool is_in_closed_subset(const void* p) const {
195 if (UseConcMarkSweepGC) {
196 return is_in_reserved(p);
197 } else {
198 return is_in(p);
199 }
200 }
201
202 // Returns true if the reference is to an object in the reserved space
203 // for the young generation.
204 // Assumes the the young gen address range is less than that of the old gen.
291
292 // Ensure parsability: override
293 virtual void ensure_parsability(bool retire_tlabs);
294
295 // Time in ms since the longest time a collector ran in
296 // in any generation.
297 virtual jlong millis_since_last_gc();
298
299 // Total number of full collections completed.
300 unsigned int total_full_collections_completed() {
301 assert(_full_collections_completed <= _total_full_collections,
302 "Can't complete more collections than were started");
303 return _full_collections_completed;
304 }
305
306 // Update above counter, as appropriate, at the end of a stop-world GC cycle
307 unsigned int update_full_collections_completed();
308 // Update above counter, as appropriate, at the end of a concurrent GC cycle
309 unsigned int update_full_collections_completed(unsigned int count);
310
311 // Update "time of last gc" for all generations to "now".
312 void update_time_of_last_gc(jlong now) {
313 _young_gen->update_time_of_last_gc(now);
314 _old_gen->update_time_of_last_gc(now);
315 }
316
317 // Update the gc statistics for each generation.
318 void update_gc_stats(Generation* current_generation, bool full) {
319 _old_gen->update_gc_stats(current_generation, full);
320 }
321
322 // Override.
323 bool no_gc_in_progress() { return !is_gc_active(); }
324
325 // Override.
326 void prepare_for_verify();
327
328 // Override.
329 void verify(bool silent, VerifyOption option);
330
331 // Override.
332 virtual void print_on(outputStream* st) const;
333 virtual void print_gc_threads_on(outputStream* st) const;
334 virtual void gc_threads_do(ThreadClosure* tc) const;
335 virtual void print_tracing_info() const;
336 virtual void print_on_error(outputStream* st) const;
337
338 // PrintGC, PrintGCDetails support
339 void print_heap_change(size_t prev_used) const;
341 // The functions below are helper functions that a subclass of
342 // "CollectedHeap" can use in the implementation of its virtual
343 // functions.
344
345 class GenClosure : public StackObj {
346 public:
347 virtual void do_generation(Generation* gen) = 0;
348 };
349
350 // Apply "cl.do_generation" to all generations in the heap
351 // If "old_to_young" determines the order.
352 void generation_iterate(GenClosure* cl, bool old_to_young);
353
354 void space_iterate(SpaceClosure* cl);
355
356 // Return "true" if all generations have reached the
357 // maximal committed limit that they can reach, without a garbage
358 // collection.
359 virtual bool is_maximal_no_gc() const;
360
361 // Convenience function to be used in situations where the heap type can be
362 // asserted to be this type.
363 static GenCollectedHeap* heap();
364
365 void set_par_threads(uint t);
366
367 // Invoke the "do_oop" method of one of the closures "not_older_gens"
368 // or "older_gens" on root locations for the generations depending on
369 // the type. (The "older_gens" closure is used for scanning references
370 // from older generations; "not_older_gens" is used everywhere else.)
371 // If "younger_gens_as_roots" is false, younger generations are
372 // not scanned as roots; in this case, the caller must be arranging to
373 // scan the younger generations itself. (For example, a generation might
374 // explicitly mark reachable objects in younger generations, to avoid
375 // excess storage retention.)
376 // The "so" argument determines which of the roots
377 // the closure is applied to:
378 // "SO_None" does none;
379 private:
380 void gen_process_roots(Generation::Type type,
381 bool younger_gens_as_roots,
382 bool activate_scope,
383 SharedHeap::ScanningOption so,
384 OopsInGenClosure* not_older_gens,
385 OopsInGenClosure* weak_roots,
386 OopsInGenClosure* older_gens,
387 CLDClosure* cld_closure,
388 CLDClosure* weak_cld_closure,
389 CodeBlobClosure* code_closure);
390
391 public:
392 static const bool StrongAndWeakRoots = false;
393 static const bool StrongRootsOnly = true;
394
395 void gen_process_roots(Generation::Type type,
396 bool younger_gens_as_roots,
397 bool activate_scope,
398 SharedHeap::ScanningOption so,
399 bool only_strong_roots,
400 OopsInGenClosure* not_older_gens,
401 OopsInGenClosure* older_gens,
402 CLDClosure* cld_closure);
403
404 // Apply "root_closure" to all the weak roots of the system.
405 // These include JNI weak roots, string table,
406 // and referents of reachable weak refs.
407 void gen_process_weak_roots(OopClosure* root_closure);
408
409 // Set the saved marks of generations, if that makes sense.
410 // In particular, if any generation might iterate over the oops
411 // in other generations, it should call this method.
412 void save_marks();
413
414 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
415 // allocated since the last call to save_marks in generations at or above
416 // "level". The "cur" closure is
417 // applied to references in the generation at "level", and the "older"
418 // closure to older generations.
419 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
420 void oop_since_save_marks_iterate(Generation::Type start_gen, \
421 OopClosureType* cur, \
422 OopClosureType* older);
423
424 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
425
426 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
427
428 // Returns "true" iff no allocations have occurred since the last
429 // call to "save_marks".
430 bool no_allocs_since_save_marks(bool include_young);
431
432 // Returns true if an incremental collection is likely to fail.
433 // We optionally consult the young gen, if asked to do so;
434 // otherwise we base our answer on whether the previous incremental
435 // collection attempt failed with no corrective action as of yet.
436 bool incremental_collection_will_fail(bool consult_young) {
437 // The first disjunct remembers if an incremental collection failed, even
438 // when we thought (second disjunct) that it would not.
439 return incremental_collection_failed() ||
440 (consult_young && !_young_gen->collection_attempt_is_safe());
441 }
442
443 // If a generation bails out of an incremental collection,
444 // it sets this flag.
445 bool incremental_collection_failed() const {
446 return _incremental_collection_failed;
447 }
448 void set_incremental_collection_failed() {
449 _incremental_collection_failed = true;
450 }
451 void clear_incremental_collection_failed() {
452 _incremental_collection_failed = false;
453 }
454
455 // Promotion of obj into gen failed. Try to promote obj to higher
456 // gens in ascending order; return the new location of obj if successful.
457 // Otherwise, try expand-and-allocate for obj in both the young and old
458 // generation; return the new location of obj if successful. Otherwise, return NULL.
459 oop handle_failed_promotion(Generation* old_gen,
460 oop obj,
461 size_t obj_size);
462
463 private:
464 // Accessor for memory state verification support
465 NOT_PRODUCT(
466 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
467 )
468
469 // Override
470 void check_for_non_bad_heap_word_value(HeapWord* addr,
471 size_t size) PRODUCT_RETURN;
472
473 // For use by mark-sweep. As implemented, mark-sweep-compact is global
474 // in an essential way: compaction is performed across generations, by
475 // iterating over spaces.
476 void prepare_for_compaction();
477
478 // Perform a full collection of the generations up to and including max_gen.
479 // This is the low level interface used by the public versions of
480 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
481 void collect_locked(GCCause::Cause cause, Generation::Type max_gen);
482
483 // Returns success or failure.
484 bool create_cms_collector();
485
486 // In support of ExplicitGCInvokesConcurrent functionality
487 bool should_do_concurrent_full_gc(GCCause::Cause cause);
488 void collect_mostly_concurrent(GCCause::Cause cause);
489
490 // Save the tops of the spaces in all generations
491 void record_gen_tops_before_GC() PRODUCT_RETURN;
492
493 protected:
494 virtual void gc_prologue(bool full);
495 virtual void gc_epilogue(bool full);
496 };
497
498 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
|