70 // The generational collector policy.
71 GenCollectorPolicy* _gen_policy;
72
73 // Indicates that the most recent previous incremental collection failed.
74 // The flag is cleared when an action is taken that might clear the
75 // condition that caused that incremental collection to fail.
76 bool _incremental_collection_failed;
77
78 // In support of ExplicitGCInvokesConcurrent functionality
79 unsigned int _full_collections_completed;
80
81 // Data structure for claiming the (potentially) parallel tasks in
82 // (gen-specific) roots processing.
83 SubTasksDone* _process_strong_tasks;
84
85 // Collects the given generation.
86 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
87 bool run_verification, bool clear_soft_refs,
88 bool restore_marks_for_biased_locking);
89
90 // In block contents verification, the number of header words to skip
91 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
92
93 WorkGang* _workers;
94
95 protected:
96 // Helper functions for allocation
97 HeapWord* attempt_allocation(size_t size,
98 bool is_tlab,
99 bool first_only);
100
101 // Helper function for two callbacks below.
102 // Considers collection of the first max_level+1 generations.
103 void do_collection(bool full,
104 bool clear_all_soft_refs,
105 size_t size,
106 bool is_tlab,
107 GenerationType max_generation);
108
109 // Callback from VM_GenCollectForAllocation operation.
173
174 size_t capacity() const;
175 size_t used() const;
176
177 // Save the "used_region" for both generations.
178 void save_used_regions();
179
180 size_t max_capacity() const;
181
182 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
183
184 // We may support a shared contiguous allocation area, if the youngest
185 // generation does.
186 bool supports_inline_contig_alloc() const;
187 HeapWord** top_addr() const;
188 HeapWord** end_addr() const;
189
190 // Perform a full collection of the heap; intended for use in implementing
191 // "System.gc". This implies as full a collection as the CollectedHeap
192 // supports. Caller does not hold the Heap_lock on entry.
193 void collect(GCCause::Cause cause);
194
195 // The same as above but assume that the caller holds the Heap_lock.
196 void collect_locked(GCCause::Cause cause);
197
198 // Perform a full collection of generations up to and including max_generation.
199 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
200 void collect(GCCause::Cause cause, GenerationType max_generation);
201
202 // Returns "TRUE" iff "p" points into the committed areas of the heap.
203 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
204 // be expensive to compute in general, so, to prevent
205 // their inadvertent use in product jvm's, we restrict their use to
206 // assertion checking or verification only.
207 bool is_in(const void* p) const;
208
209 // override
210 bool is_in_closed_subset(const void* p) const {
211 if (UseConcMarkSweepGC) {
212 return is_in_reserved(p);
213 } else {
480 private:
481 // Accessor for memory state verification support
482 NOT_PRODUCT(
483 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
484 )
485
486 // Override
487 void check_for_non_bad_heap_word_value(HeapWord* addr,
488 size_t size) PRODUCT_RETURN;
489
490 // For use by mark-sweep. As implemented, mark-sweep-compact is global
491 // in an essential way: compaction is performed across generations, by
492 // iterating over spaces.
493 void prepare_for_compaction();
494
495 // Perform a full collection of the generations up to and including max_generation.
496 // This is the low level interface used by the public versions of
497 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
498 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
499
500 // Returns success or failure.
501 bool create_cms_collector();
502
503 // In support of ExplicitGCInvokesConcurrent functionality
504 bool should_do_concurrent_full_gc(GCCause::Cause cause);
505 void collect_mostly_concurrent(GCCause::Cause cause);
506
507 // Save the tops of the spaces in all generations
508 void record_gen_tops_before_GC() PRODUCT_RETURN;
509
510 protected:
511 void gc_prologue(bool full);
512 void gc_epilogue(bool full);
513
514 public:
515 void stop();
516 };
517
518 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|
70 // The generational collector policy.
71 GenCollectorPolicy* _gen_policy;
72
73 // Indicates that the most recent previous incremental collection failed.
74 // The flag is cleared when an action is taken that might clear the
75 // condition that caused that incremental collection to fail.
76 bool _incremental_collection_failed;
77
78 // In support of ExplicitGCInvokesConcurrent functionality
79 unsigned int _full_collections_completed;
80
81 // Data structure for claiming the (potentially) parallel tasks in
82 // (gen-specific) roots processing.
83 SubTasksDone* _process_strong_tasks;
84
85 // Collects the given generation.
86 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
87 bool run_verification, bool clear_soft_refs,
88 bool restore_marks_for_biased_locking);
89
90 protected:
91 // In block contents verification, the number of header words to skip
92 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
93
94 WorkGang* _workers;
95
96 protected:
97 // Helper functions for allocation
98 HeapWord* attempt_allocation(size_t size,
99 bool is_tlab,
100 bool first_only);
101
102 // Helper function for two callbacks below.
103 // Considers collection of the first max_level+1 generations.
104 void do_collection(bool full,
105 bool clear_all_soft_refs,
106 size_t size,
107 bool is_tlab,
108 GenerationType max_generation);
109
110 // Callback from VM_GenCollectForAllocation operation.
174
175 size_t capacity() const;
176 size_t used() const;
177
178 // Save the "used_region" for both generations.
179 void save_used_regions();
180
181 size_t max_capacity() const;
182
183 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
184
185 // We may support a shared contiguous allocation area, if the youngest
186 // generation does.
187 bool supports_inline_contig_alloc() const;
188 HeapWord** top_addr() const;
189 HeapWord** end_addr() const;
190
191 // Perform a full collection of the heap; intended for use in implementing
192 // "System.gc". This implies as full a collection as the CollectedHeap
193 // supports. Caller does not hold the Heap_lock on entry.
194 virtual void collect(GCCause::Cause cause);
195
196 // The same as above but assume that the caller holds the Heap_lock.
197 void collect_locked(GCCause::Cause cause);
198
199 // Perform a full collection of generations up to and including max_generation.
200 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
201 void collect(GCCause::Cause cause, GenerationType max_generation);
202
203 // Returns "TRUE" iff "p" points into the committed areas of the heap.
204 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
205 // be expensive to compute in general, so, to prevent
206 // their inadvertent use in product jvm's, we restrict their use to
207 // assertion checking or verification only.
208 bool is_in(const void* p) const;
209
210 // override
211 bool is_in_closed_subset(const void* p) const {
212 if (UseConcMarkSweepGC) {
213 return is_in_reserved(p);
214 } else {
481 private:
482 // Accessor for memory state verification support
483 NOT_PRODUCT(
484 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
485 )
486
487 // Override
488 void check_for_non_bad_heap_word_value(HeapWord* addr,
489 size_t size) PRODUCT_RETURN;
490
491 // For use by mark-sweep. As implemented, mark-sweep-compact is global
492 // in an essential way: compaction is performed across generations, by
493 // iterating over spaces.
494 void prepare_for_compaction();
495
496 // Perform a full collection of the generations up to and including max_generation.
497 // This is the low level interface used by the public versions of
498 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
499 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
500
501 // Save the tops of the spaces in all generations
502 void record_gen_tops_before_GC() PRODUCT_RETURN;
503
504 protected:
505 void gc_prologue(bool full);
506 void gc_epilogue(bool full);
507
508 };
509
510 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|