168 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
169
170 // Fill with a single array; caller must ensure filler_array_min_size() <=
171 // words <= filler_array_max_size().
172 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
173
174 // Fill with a single object (either an int array or a java.lang.Object).
175 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
176
177 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
178
179 // Verification functions
180 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
181 PRODUCT_RETURN;
182 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
183 PRODUCT_RETURN;
184 debug_only(static void check_for_valid_allocation_state();)
185
186 public:
187 enum Name {
188 Abstract,
189 SharedHeap,
190 GenCollectedHeap,
191 ParallelScavengeHeap,
192 G1CollectedHeap
193 };
194
195 static inline size_t filler_array_max_size() {
196 return _filler_array_max_size;
197 }
198
199 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
200
201 /**
202 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
203 * and JNI_OK on success.
204 */
205 virtual jint initialize() = 0;
206
207 // In many heaps, there will be a need to perform some initialization activities
208 // after the Universe is fully formed, but before general heap allocation is allowed.
209 // This is the correct place to place such initialization methods.
210 virtual void post_initialize() = 0;
211
212 // Stop any onging concurrent work and prepare for exit.
213 virtual void stop() {}
214
215 void initialize_reserved_region(HeapWord *start, HeapWord *end);
216 MemRegion reserved_region() const { return _reserved; }
217 address base() const { return (address)reserved_region().start(); }
218
219 virtual size_t capacity() const = 0;
|
168 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
169
170 // Fill with a single array; caller must ensure filler_array_min_size() <=
171 // words <= filler_array_max_size().
172 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
173
174 // Fill with a single object (either an int array or a java.lang.Object).
175 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
176
177 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
178
179 // Verification functions
180 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
181 PRODUCT_RETURN;
182 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
183 PRODUCT_RETURN;
184 debug_only(static void check_for_valid_allocation_state();)
185
186 public:
187 enum Name {
188 GenCollectedHeap,
189 ParallelScavengeHeap,
190 G1CollectedHeap
191 };
192
193 static inline size_t filler_array_max_size() {
194 return _filler_array_max_size;
195 }
196
197 virtual Name kind() const = 0;
198
199 /**
200 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
201 * and JNI_OK on success.
202 */
203 virtual jint initialize() = 0;
204
205 // In many heaps, there will be a need to perform some initialization activities
206 // after the Universe is fully formed, but before general heap allocation is allowed.
207 // This is the correct place to place such initialization methods.
208 virtual void post_initialize() = 0;
209
210 // Stop any onging concurrent work and prepare for exit.
211 virtual void stop() {}
212
213 void initialize_reserved_region(HeapWord *start, HeapWord *end);
214 MemRegion reserved_region() const { return _reserved; }
215 address base() const { return (address)reserved_region().start(); }
216
217 virtual size_t capacity() const = 0;
|