14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
27
28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/generation.hpp"
31 #include "memory/sharedHeap.hpp"
32
33 class SubTasksDone;
34
35 // A "GenCollectedHeap" is a SharedHeap that uses generational
36 // collection. It has two generations, young and old.
37 class GenCollectedHeap : public SharedHeap {
38 friend class GenCollectorPolicy;
39 friend class Generation;
40 friend class DefNewGeneration;
41 friend class TenuredGeneration;
42 friend class ConcurrentMarkSweepGeneration;
43 friend class CMSCollector;
44 friend class GenMarkSweep;
45 friend class VM_GenCollectForAllocation;
46 friend class VM_GenCollectFull;
47 friend class VM_GenCollectFullConcurrent;
48 friend class VM_GC_HeapInspection;
49 friend class VM_HeapDumper;
50 friend class HeapInspection;
51 friend class GCCauseSetter;
52 friend class VMStructs;
53 public:
76 // Indicates that the most recent previous incremental collection failed.
77 // The flag is cleared when an action is taken that might clear the
78 // condition that caused that incremental collection to fail.
79 bool _incremental_collection_failed;
80
81 // In support of ExplicitGCInvokesConcurrent functionality
82 unsigned int _full_collections_completed;
83
84 // Data structure for claiming the (potentially) parallel tasks in
85 // (gen-specific) roots processing.
86 SubTasksDone* _process_strong_tasks;
87
88 // Collects the given generation.
89 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
90 bool run_verification, bool clear_soft_refs,
91 bool restore_marks_for_biased_locking);
92
93 // In block contents verification, the number of header words to skip
94 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
95
96 protected:
97 // Helper functions for allocation
98 HeapWord* attempt_allocation(size_t size,
99 bool is_tlab,
100 bool first_only);
101
102 // Helper function for two callbacks below.
103 // Considers collection of the first max_level+1 generations.
104 void do_collection(bool full,
105 bool clear_all_soft_refs,
106 size_t size,
107 bool is_tlab,
108 int max_level);
109
110 // Callback from VM_GenCollectForAllocation operation.
111 // This function does everything necessary/possible to satisfy an
112 // allocation request that failed in the youngest generation that should
113 // have handled it (including collection, expansion, etc.)
114 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
115
116 // Callback from VM_GenCollectFull operation.
117 // Perform a full collection of the first max_level+1 generations.
118 virtual void do_full_collection(bool clear_all_soft_refs);
119 void do_full_collection(bool clear_all_soft_refs, int max_level);
120
121 // Does the "cause" of GC indicate that
122 // we absolutely __must__ clear soft refs?
123 bool must_clear_all_soft_refs();
124
125 public:
126 GenCollectedHeap(GenCollectorPolicy *policy);
127
128 GCStats* gc_stats(int level) const;
129
130 // Returns JNI_OK on success
131 virtual jint initialize();
132
133 // Reserve aligned space for the heap as needed by the contained generations.
134 char* allocate(size_t alignment, ReservedSpace* heap_rs);
135
136 // Does operations required after initialization has been done.
137 void post_initialize();
138
139 // Initialize ("weak") refs processing support
140 virtual void ref_processing_init();
141
142 virtual Name kind() const {
143 return CollectedHeap::GenCollectedHeap;
144 }
145
146 Generation* young_gen() const { return _young_gen; }
147 Generation* old_gen() const { return _old_gen; }
206 return is_in_reserved(p);
207 } else {
208 return is_in(p);
209 }
210 }
211
212 // Returns true if the reference is to an object in the reserved space
213 // for the young generation.
214 // Assumes the the young gen address range is less than that of the old gen.
215 bool is_in_young(oop p);
216
217 #ifdef ASSERT
218 bool is_in_partial_collection(const void* p);
219 #endif
220
221 virtual bool is_scavengable(const void* addr) {
222 return is_in_young((oop)addr);
223 }
224
225 // Iteration functions.
226 void oop_iterate(ExtendedOopClosure* cl);
227 void object_iterate(ObjectClosure* cl);
228 void safe_object_iterate(ObjectClosure* cl);
229 Space* space_containing(const void* addr) const;
230
231 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
232 // each address in the (reserved) heap is a member of exactly
233 // one block. The defining characteristic of a block is that it is
234 // possible to find its size, and thus to progress forward to the next
235 // block. (Blocks may be of different sizes.) Thus, blocks may
236 // represent Java objects, or they might be free blocks in a
237 // free-list-based heap (or subheap), as long as the two kinds are
238 // distinguishable and the size of each is determinable.
239
240 // Returns the address of the start of the "block" that contains the
241 // address "addr". We say "blocks" instead of "object" since some heaps
242 // may not pack objects densely; a chunk may either be an object or a
243 // non-object.
244 virtual HeapWord* block_start(const void* addr) const;
245
314 }
315
316 // Update above counter, as appropriate, at the end of a stop-world GC cycle
317 unsigned int update_full_collections_completed();
318 // Update above counter, as appropriate, at the end of a concurrent GC cycle
319 unsigned int update_full_collections_completed(unsigned int count);
320
321 // Update "time of last gc" for all generations to "now".
322 void update_time_of_last_gc(jlong now) {
323 _young_gen->update_time_of_last_gc(now);
324 _old_gen->update_time_of_last_gc(now);
325 }
326
327 // Update the gc statistics for each generation.
328 // "level" is the level of the latest collection.
329 void update_gc_stats(int current_level, bool full) {
330 _young_gen->update_gc_stats(current_level, full);
331 _old_gen->update_gc_stats(current_level, full);
332 }
333
334 // Override.
335 bool no_gc_in_progress() { return !is_gc_active(); }
336
337 // Override.
338 void prepare_for_verify();
339
340 // Override.
341 void verify(bool silent, VerifyOption option);
342
343 // Override.
344 virtual void print_on(outputStream* st) const;
345 virtual void print_gc_threads_on(outputStream* st) const;
346 virtual void gc_threads_do(ThreadClosure* tc) const;
347 virtual void print_tracing_info() const;
348 virtual void print_on_error(outputStream* st) const;
349
350 // PrintGC, PrintGCDetails support
351 void print_heap_change(size_t prev_used) const;
352
353 // The functions below are helper functions that a subclass of
354 // "CollectedHeap" can use in the implementation of its virtual
355 // functions.
356
357 class GenClosure : public StackObj {
358 public:
359 virtual void do_generation(Generation* gen) = 0;
360 };
361
362 // Apply "cl.do_generation" to all generations in the heap
363 // If "old_to_young" determines the order.
364 void generation_iterate(GenClosure* cl, bool old_to_young);
365
366 void space_iterate(SpaceClosure* cl);
367
368 // Return "true" if all generations have reached the
369 // maximal committed limit that they can reach, without a garbage
370 // collection.
371 virtual bool is_maximal_no_gc() const;
372
373 int n_gens() const {
374 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
375 return _n_gens;
376 }
377
378 // This function returns the "GenRemSet" object that allows us to scan
379 // generations in a fully generational heap.
380 GenRemSet* rem_set() { return _rem_set; }
381
382 // Convenience function to be used in situations where the heap type can be
383 // asserted to be this type.
384 static GenCollectedHeap* heap();
385
386 void set_par_threads(uint t);
387 void set_n_termination(uint t);
514 // in an essential way: compaction is performed across generations, by
515 // iterating over spaces.
516 void prepare_for_compaction();
517
518 // Perform a full collection of the first max_level+1 generations.
519 // This is the low level interface used by the public versions of
520 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
521 void collect_locked(GCCause::Cause cause, int max_level);
522
523 // Returns success or failure.
524 bool create_cms_collector();
525
526 // In support of ExplicitGCInvokesConcurrent functionality
527 bool should_do_concurrent_full_gc(GCCause::Cause cause);
528 void collect_mostly_concurrent(GCCause::Cause cause);
529
530 // Save the tops of the spaces in all generations
531 void record_gen_tops_before_GC() PRODUCT_RETURN;
532
533 protected:
534 virtual void gc_prologue(bool full);
535 virtual void gc_epilogue(bool full);
536 };
537
538 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
27
28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/generation.hpp"
31 #include "memory/sharedHeap.hpp"
32
33 class SubTasksDone;
34 class FlexibleWorkGang;
35
36 // A "GenCollectedHeap" is a SharedHeap that uses generational
37 // collection. It has two generations, young and old.
38 class GenCollectedHeap : public SharedHeap {
39 friend class GenCollectorPolicy;
40 friend class Generation;
41 friend class DefNewGeneration;
42 friend class TenuredGeneration;
43 friend class ConcurrentMarkSweepGeneration;
44 friend class CMSCollector;
45 friend class GenMarkSweep;
46 friend class VM_GenCollectForAllocation;
47 friend class VM_GenCollectFull;
48 friend class VM_GenCollectFullConcurrent;
49 friend class VM_GC_HeapInspection;
50 friend class VM_HeapDumper;
51 friend class HeapInspection;
52 friend class GCCauseSetter;
53 friend class VMStructs;
54 public:
77 // Indicates that the most recent previous incremental collection failed.
78 // The flag is cleared when an action is taken that might clear the
79 // condition that caused that incremental collection to fail.
80 bool _incremental_collection_failed;
81
82 // In support of ExplicitGCInvokesConcurrent functionality
83 unsigned int _full_collections_completed;
84
85 // Data structure for claiming the (potentially) parallel tasks in
86 // (gen-specific) roots processing.
87 SubTasksDone* _process_strong_tasks;
88
89 // Collects the given generation.
90 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
91 bool run_verification, bool clear_soft_refs,
92 bool restore_marks_for_biased_locking);
93
94 // In block contents verification, the number of header words to skip
95 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
96
97 FlexibleWorkGang* _workers;
98
99 protected:
100 // Helper functions for allocation
101 HeapWord* attempt_allocation(size_t size,
102 bool is_tlab,
103 bool first_only);
104
105 // Helper function for two callbacks below.
106 // Considers collection of the first max_level+1 generations.
107 void do_collection(bool full,
108 bool clear_all_soft_refs,
109 size_t size,
110 bool is_tlab,
111 int max_level);
112
113 // Callback from VM_GenCollectForAllocation operation.
114 // This function does everything necessary/possible to satisfy an
115 // allocation request that failed in the youngest generation that should
116 // have handled it (including collection, expansion, etc.)
117 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
118
119 // Callback from VM_GenCollectFull operation.
120 // Perform a full collection of the first max_level+1 generations.
121 virtual void do_full_collection(bool clear_all_soft_refs);
122 void do_full_collection(bool clear_all_soft_refs, int max_level);
123
124 // Does the "cause" of GC indicate that
125 // we absolutely __must__ clear soft refs?
126 bool must_clear_all_soft_refs();
127
128 public:
129 GenCollectedHeap(GenCollectorPolicy *policy);
130
131 FlexibleWorkGang* workers() const { return _workers; }
132
133 GCStats* gc_stats(int level) const;
134
135 // Returns JNI_OK on success
136 virtual jint initialize();
137
138 // Reserve aligned space for the heap as needed by the contained generations.
139 char* allocate(size_t alignment, ReservedSpace* heap_rs);
140
141 // Does operations required after initialization has been done.
142 void post_initialize();
143
144 // Initialize ("weak") refs processing support
145 virtual void ref_processing_init();
146
147 virtual Name kind() const {
148 return CollectedHeap::GenCollectedHeap;
149 }
150
151 Generation* young_gen() const { return _young_gen; }
152 Generation* old_gen() const { return _old_gen; }
211 return is_in_reserved(p);
212 } else {
213 return is_in(p);
214 }
215 }
216
217 // Returns true if the reference is to an object in the reserved space
218 // for the young generation.
219 // Assumes the the young gen address range is less than that of the old gen.
220 bool is_in_young(oop p);
221
222 #ifdef ASSERT
223 bool is_in_partial_collection(const void* p);
224 #endif
225
226 virtual bool is_scavengable(const void* addr) {
227 return is_in_young((oop)addr);
228 }
229
230 // Iteration functions.
231 void oop_iterate_no_header(OopClosure* cl);
232 void oop_iterate(ExtendedOopClosure* cl);
233 void object_iterate(ObjectClosure* cl);
234 void safe_object_iterate(ObjectClosure* cl);
235 Space* space_containing(const void* addr) const;
236
237 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
238 // each address in the (reserved) heap is a member of exactly
239 // one block. The defining characteristic of a block is that it is
240 // possible to find its size, and thus to progress forward to the next
241 // block. (Blocks may be of different sizes.) Thus, blocks may
242 // represent Java objects, or they might be free blocks in a
243 // free-list-based heap (or subheap), as long as the two kinds are
244 // distinguishable and the size of each is determinable.
245
246 // Returns the address of the start of the "block" that contains the
247 // address "addr". We say "blocks" instead of "object" since some heaps
248 // may not pack objects densely; a chunk may either be an object or a
249 // non-object.
250 virtual HeapWord* block_start(const void* addr) const;
251
320 }
321
322 // Update above counter, as appropriate, at the end of a stop-world GC cycle
323 unsigned int update_full_collections_completed();
324 // Update above counter, as appropriate, at the end of a concurrent GC cycle
325 unsigned int update_full_collections_completed(unsigned int count);
326
327 // Update "time of last gc" for all generations to "now".
328 void update_time_of_last_gc(jlong now) {
329 _young_gen->update_time_of_last_gc(now);
330 _old_gen->update_time_of_last_gc(now);
331 }
332
333 // Update the gc statistics for each generation.
334 // "level" is the level of the latest collection.
335 void update_gc_stats(int current_level, bool full) {
336 _young_gen->update_gc_stats(current_level, full);
337 _old_gen->update_gc_stats(current_level, full);
338 }
339
340 bool no_gc_in_progress() { return !is_gc_active(); }
341
342 // Override.
343 void prepare_for_verify();
344
345 // Override.
346 void verify(bool silent, VerifyOption option);
347
348 // Override.
349 virtual void print_on(outputStream* st) const;
350 virtual void print_gc_threads_on(outputStream* st) const;
351 virtual void gc_threads_do(ThreadClosure* tc) const;
352 virtual void print_tracing_info() const;
353 virtual void print_on_error(outputStream* st) const;
354
355 // PrintGC, PrintGCDetails support
356 void print_heap_change(size_t prev_used) const;
357
358 // The functions below are helper functions that a subclass of
359 // "CollectedHeap" can use in the implementation of its virtual
360 // functions.
361
362 class GenClosure : public StackObj {
363 public:
364 virtual void do_generation(Generation* gen) = 0;
365 };
366
367 // Apply "cl.do_generation" to all generations in the heap
368 // If "old_to_young" determines the order.
369 void generation_iterate(GenClosure* cl, bool old_to_young);
370
371 // Return "true" if all generations have reached the
372 // maximal committed limit that they can reach, without a garbage
373 // collection.
374 virtual bool is_maximal_no_gc() const;
375
376 int n_gens() const {
377 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
378 return _n_gens;
379 }
380
381 // This function returns the "GenRemSet" object that allows us to scan
382 // generations in a fully generational heap.
383 GenRemSet* rem_set() { return _rem_set; }
384
385 // Convenience function to be used in situations where the heap type can be
386 // asserted to be this type.
387 static GenCollectedHeap* heap();
388
389 void set_par_threads(uint t);
390 void set_n_termination(uint t);
517 // in an essential way: compaction is performed across generations, by
518 // iterating over spaces.
519 void prepare_for_compaction();
520
521 // Perform a full collection of the first max_level+1 generations.
522 // This is the low level interface used by the public versions of
523 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
524 void collect_locked(GCCause::Cause cause, int max_level);
525
526 // Returns success or failure.
527 bool create_cms_collector();
528
529 // In support of ExplicitGCInvokesConcurrent functionality
530 bool should_do_concurrent_full_gc(GCCause::Cause cause);
531 void collect_mostly_concurrent(GCCause::Cause cause);
532
533 // Save the tops of the spaces in all generations
534 void record_gen_tops_before_GC() PRODUCT_RETURN;
535
536 protected:
537 void gc_prologue(bool full);
538 void gc_epilogue(bool full);
539 };
540
541 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
|