200 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
201 PRODUCT_RETURN;
202 debug_only(static void check_for_valid_allocation_state();)
203
204 public:
205 enum Name {
206 None,
207 Serial,
208 Parallel,
209 CMS,
210 G1,
211 Shenandoah
212 };
213
214 static inline size_t filler_array_max_size() {
215 return _filler_array_max_size;
216 }
217
218 virtual Name kind() const = 0;
219
220 virtual HeapWord* tlab_post_allocation_setup(HeapWord* obj);
221
222 virtual const char* name() const = 0;
223
224 /**
225 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
226 * and JNI_OK on success.
227 */
228 virtual jint initialize() = 0;
229
230 // In many heaps, there will be a need to perform some initialization activities
231 // after the Universe is fully formed, but before general heap allocation is allowed.
232 // This is the correct place to place such initialization methods.
233 virtual void post_initialize();
234
235 // Stop any onging concurrent work and prepare for exit.
236 virtual void stop() {}
237
238 // Stop and resume concurrent GC threads interfering with safepoint operations
239 virtual void safepoint_synchronize_begin() {}
240 virtual void safepoint_synchronize_end() {}
241
339 // Utilities for turning raw memory into filler objects.
340 //
341 // min_fill_size() is the smallest region that can be filled.
342 // fill_with_objects() can fill arbitrary-sized regions of the heap using
343 // multiple objects. fill_with_object() is for regions known to be smaller
344 // than the largest array of integers; it uses a single object to fill the
345 // region and has slightly less overhead.
346 static size_t min_fill_size() {
347 return size_t(align_object_size(oopDesc::header_size()));
348 }
349
350 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
351
352 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
353 static void fill_with_object(MemRegion region, bool zap = true) {
354 fill_with_object(region.start(), region.word_size(), zap);
355 }
356 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
357 fill_with_object(start, pointer_delta(end, start), zap);
358 }
359
360 // Return the address "addr" aligned by "alignment_in_bytes" if such
361 // an address is below "end". Return NULL otherwise.
362 inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
363 HeapWord* end,
364 unsigned short alignment_in_bytes);
365
366 // Some heaps may offer a contiguous region for shared non-blocking
367 // allocation, via inlined code (by exporting the address of the top and
368 // end fields defining the extent of the contiguous allocation region.)
369
370 // This function returns "true" iff the heap supports this kind of
371 // allocation. (Default is "no".)
372 virtual bool supports_inline_contig_alloc() const {
373 return false;
374 }
375 // These functions return the addresses of the fields that define the
376 // boundaries of the contiguous allocation area. (These fields should be
377 // physically near to one another.)
378 virtual HeapWord* volatile* top_addr() const {
|
200 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
201 PRODUCT_RETURN;
202 debug_only(static void check_for_valid_allocation_state();)
203
204 public:
205 enum Name {
206 None,
207 Serial,
208 Parallel,
209 CMS,
210 G1,
211 Shenandoah
212 };
213
214 static inline size_t filler_array_max_size() {
215 return _filler_array_max_size;
216 }
217
218 virtual Name kind() const = 0;
219
220 virtual const char* name() const = 0;
221
222 /**
223 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
224 * and JNI_OK on success.
225 */
226 virtual jint initialize() = 0;
227
228 // In many heaps, there will be a need to perform some initialization activities
229 // after the Universe is fully formed, but before general heap allocation is allowed.
230 // This is the correct place to place such initialization methods.
231 virtual void post_initialize();
232
233 // Stop any onging concurrent work and prepare for exit.
234 virtual void stop() {}
235
236 // Stop and resume concurrent GC threads interfering with safepoint operations
237 virtual void safepoint_synchronize_begin() {}
238 virtual void safepoint_synchronize_end() {}
239
337 // Utilities for turning raw memory into filler objects.
338 //
339 // min_fill_size() is the smallest region that can be filled.
340 // fill_with_objects() can fill arbitrary-sized regions of the heap using
341 // multiple objects. fill_with_object() is for regions known to be smaller
342 // than the largest array of integers; it uses a single object to fill the
343 // region and has slightly less overhead.
344 static size_t min_fill_size() {
345 return size_t(align_object_size(oopDesc::header_size()));
346 }
347
348 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
349
350 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
351 static void fill_with_object(MemRegion region, bool zap = true) {
352 fill_with_object(region.start(), region.word_size(), zap);
353 }
354 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
355 fill_with_object(start, pointer_delta(end, start), zap);
356 }
357
358 virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
359
360 // Return the address "addr" aligned by "alignment_in_bytes" if such
361 // an address is below "end". Return NULL otherwise.
362 inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
363 HeapWord* end,
364 unsigned short alignment_in_bytes);
365
366 // Some heaps may offer a contiguous region for shared non-blocking
367 // allocation, via inlined code (by exporting the address of the top and
368 // end fields defining the extent of the contiguous allocation region.)
369
370 // This function returns "true" iff the heap supports this kind of
371 // allocation. (Default is "no".)
372 virtual bool supports_inline_contig_alloc() const {
373 return false;
374 }
375 // These functions return the addresses of the fields that define the
376 // boundaries of the contiguous allocation area. (These fields should be
377 // physically near to one another.)
378 virtual HeapWord* volatile* top_addr() const {
|