188 GenCollectedHeap,
189 ParallelScavengeHeap,
190 G1CollectedHeap
191 };
192
193 static inline size_t filler_array_max_size() {
194 return _filler_array_max_size;
195 }
196
197 virtual Name kind() const = 0;
198
199 /**
200 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
201 * and JNI_OK on success.
202 */
203 virtual jint initialize() = 0;
204
205 // In many heaps, there will be a need to perform some initialization activities
206 // after the Universe is fully formed, but before general heap allocation is allowed.
207 // This is the correct place to place such initialization methods.
208 virtual void post_initialize() = 0;
209
210 // Stop any onging concurrent work and prepare for exit.
211 virtual void stop() {}
212
213 void initialize_reserved_region(HeapWord *start, HeapWord *end);
214 MemRegion reserved_region() const { return _reserved; }
215 address base() const { return (address)reserved_region().start(); }
216
217 virtual size_t capacity() const = 0;
218 virtual size_t used() const = 0;
219
220 // Return "true" if the part of the heap that allocates Java
221 // objects has reached the maximal committed limit that it can
222 // reach, without a garbage collection.
223 virtual bool is_maximal_no_gc() const = 0;
224
225 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
226 // memory that the vm could make available for storing 'normal' java objects.
227 // This is based on the reserved address space, but should not include space
228 // that the vm uses internally for bookkeeping or temporary storage
453
454 // Does this heap support heap inspection (+PrintClassHistogram?)
455 virtual bool supports_heap_inspection() const = 0;
456
457 // Perform a collection of the heap; intended for use in implementing
458 // "System.gc". This probably implies as full a collection as the
459 // "CollectedHeap" supports.
460 virtual void collect(GCCause::Cause cause) = 0;
461
462 // Perform a full collection
463 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
464
465 // This interface assumes that it's being called by the
466 // vm thread. It collects the heap assuming that the
467 // heap lock is already held and that we are executing in
468 // the context of the vm thread.
469 virtual void collect_as_vm_thread(GCCause::Cause cause);
470
471 // Returns the barrier set for this heap
472 BarrierSet* barrier_set() { return _barrier_set; }
473
474 // Returns "true" iff there is a stop-world GC in progress. (I assume
475 // that it should answer "false" for the concurrent part of a concurrent
476 // collector -- dld).
477 bool is_gc_active() const { return _is_gc_active; }
478
479 // Total number of GC collections (started)
480 unsigned int total_collections() const { return _total_collections; }
481 unsigned int total_full_collections() const { return _total_full_collections;}
482
483 // Increment total number of GC collections (started)
484 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
485 void increment_total_collections(bool full = false) {
486 _total_collections++;
487 if (full) {
488 increment_total_full_collections();
489 }
490 }
491
492 void increment_total_full_collections() { _total_full_collections++; }
493
494 // Return the AdaptiveSizePolicy for the heap.
495 virtual AdaptiveSizePolicy* size_policy() = 0;
496
497 // Return the CollectorPolicy for the heap
498 virtual CollectorPolicy* collector_policy() const = 0;
499
500 void oop_iterate_no_header(OopClosure* cl);
501
502 // Iterate over all the ref-containing fields of all objects, calling
503 // "cl.do_oop" on each.
504 virtual void oop_iterate(ExtendedOopClosure* cl) = 0;
505
506 // Iterate over all objects, calling "cl.do_object" on each.
507 virtual void object_iterate(ObjectClosure* cl) = 0;
508
509 // Similar to object_iterate() except iterates only
510 // over live objects.
511 virtual void safe_object_iterate(ObjectClosure* cl) = 0;
512
513 // NOTE! There is no requirement that a collector implement these
514 // functions.
515 //
516 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
517 // each address in the (reserved) heap is a member of exactly
518 // one block. The defining characteristic of a block is that it is
519 // possible to find its size, and thus to progress forward to the next
520 // block. (Blocks may be of different sizes.) Thus, blocks may
521 // represent Java objects, or they might be free blocks in a
522 // free-list-based heap (or subheap), as long as the two kinds are
523 // distinguishable and the size of each is determinable.
524
|
188 GenCollectedHeap,
189 ParallelScavengeHeap,
190 G1CollectedHeap
191 };
192
193 static inline size_t filler_array_max_size() {
194 return _filler_array_max_size;
195 }
196
197 virtual Name kind() const = 0;
198
199 /**
200 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
201 * and JNI_OK on success.
202 */
203 virtual jint initialize() = 0;
204
205 // In many heaps, there will be a need to perform some initialization activities
206 // after the Universe is fully formed, but before general heap allocation is allowed.
207 // This is the correct place to place such initialization methods.
208 virtual void post_initialize();
209
210 // Stop any onging concurrent work and prepare for exit.
211 virtual void stop() {}
212
213 void initialize_reserved_region(HeapWord *start, HeapWord *end);
214 MemRegion reserved_region() const { return _reserved; }
215 address base() const { return (address)reserved_region().start(); }
216
217 virtual size_t capacity() const = 0;
218 virtual size_t used() const = 0;
219
220 // Return "true" if the part of the heap that allocates Java
221 // objects has reached the maximal committed limit that it can
222 // reach, without a garbage collection.
223 virtual bool is_maximal_no_gc() const = 0;
224
225 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
226 // memory that the vm could make available for storing 'normal' java objects.
227 // This is based on the reserved address space, but should not include space
228 // that the vm uses internally for bookkeeping or temporary storage
453
454 // Does this heap support heap inspection (+PrintClassHistogram?)
455 virtual bool supports_heap_inspection() const = 0;
456
457 // Perform a collection of the heap; intended for use in implementing
458 // "System.gc". This probably implies as full a collection as the
459 // "CollectedHeap" supports.
460 virtual void collect(GCCause::Cause cause) = 0;
461
462 // Perform a full collection
463 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
464
465 // This interface assumes that it's being called by the
466 // vm thread. It collects the heap assuming that the
467 // heap lock is already held and that we are executing in
468 // the context of the vm thread.
469 virtual void collect_as_vm_thread(GCCause::Cause cause);
470
471 // Returns the barrier set for this heap
472 BarrierSet* barrier_set() { return _barrier_set; }
473 void set_barrier_set(BarrierSet* barrier_set);
474
475 // Returns "true" iff there is a stop-world GC in progress. (I assume
476 // that it should answer "false" for the concurrent part of a concurrent
477 // collector -- dld).
478 bool is_gc_active() const { return _is_gc_active; }
479
480 // Total number of GC collections (started)
481 unsigned int total_collections() const { return _total_collections; }
482 unsigned int total_full_collections() const { return _total_full_collections;}
483
484 // Increment total number of GC collections (started)
485 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
486 void increment_total_collections(bool full = false) {
487 _total_collections++;
488 if (full) {
489 increment_total_full_collections();
490 }
491 }
492
493 void increment_total_full_collections() { _total_full_collections++; }
494
495 // Return the AdaptiveSizePolicy for the heap.
496 virtual AdaptiveSizePolicy* size_policy() = 0;
497
498 // Return the CollectorPolicy for the heap
499 virtual CollectorPolicy* collector_policy() const = 0;
500
501 // Iterate over all objects, calling "cl.do_object" on each.
502 virtual void object_iterate(ObjectClosure* cl) = 0;
503
504 // Similar to object_iterate() except iterates only
505 // over live objects.
506 virtual void safe_object_iterate(ObjectClosure* cl) = 0;
507
508 // NOTE! There is no requirement that a collector implement these
509 // functions.
510 //
511 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
512 // each address in the (reserved) heap is a member of exactly
513 // one block. The defining characteristic of a block is that it is
514 // possible to find its size, and thus to progress forward to the next
515 // block. (Blocks may be of different sizes.) Thus, blocks may
516 // represent Java objects, or they might be free blocks in a
517 // free-list-based heap (or subheap), as long as the two kinds are
518 // distinguishable and the size of each is determinable.
519
|