41 class AdaptiveSizePolicy;
42 class BarrierSet;
43 class CollectorPolicy;
44 class GCHeapSummary;
45 class GCTimer;
46 class GCTracer;
47 class MetaspaceSummary;
48 class Thread;
49 class ThreadClosure;
50 class VirtualSpaceSummary;
51 class nmethod;
52
53 class GCMessage : public FormatBuffer<1024> {
54 public:
55 bool is_before;
56
57 public:
58 GCMessage() {}
59 };
60
61 class GCHeapLog : public EventLogBase<GCMessage> {
62 private:
63 void log_heap(bool before);
64
65 public:
66 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
67
68 void log_heap_before() {
69 log_heap(true);
70 }
71 void log_heap_after() {
72 log_heap(false);
73 }
74 };
75
76 //
77 // CollectedHeap
78 // GenCollectedHeap
79 // G1CollectedHeap
80 // ParallelScavengeHeap
81 //
82 class CollectedHeap : public CHeapObj<mtInternal> {
83 friend class VMStructs;
84 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
85
86 private:
87 #ifdef ASSERT
88 static int _fire_out_of_memory_count;
89 #endif
90
91 GCHeapLog* _gc_heap_log;
92
178 // Verification functions
179 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
180 PRODUCT_RETURN;
181 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
182 PRODUCT_RETURN;
183 debug_only(static void check_for_valid_allocation_state();)
184
185 public:
186 enum Name {
187 GenCollectedHeap,
188 ParallelScavengeHeap,
189 G1CollectedHeap
190 };
191
192 static inline size_t filler_array_max_size() {
193 return _filler_array_max_size;
194 }
195
196 virtual Name kind() const = 0;
197
198 /**
199 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
200 * and JNI_OK on success.
201 */
202 virtual jint initialize() = 0;
203
204 // In many heaps, there will be a need to perform some initialization activities
205 // after the Universe is fully formed, but before general heap allocation is allowed.
206 // This is the correct place to place such initialization methods.
207 virtual void post_initialize();
208
209 // Stop any onging concurrent work and prepare for exit.
210 virtual void stop() {}
211
212 void initialize_reserved_region(HeapWord *start, HeapWord *end);
213 MemRegion reserved_region() const { return _reserved; }
214 address base() const { return (address)reserved_region().start(); }
215
216 virtual size_t capacity() const = 0;
217 virtual size_t used() const = 0;
502 // non-object.
503 virtual HeapWord* block_start(const void* addr) const = 0;
504
505 // Requires "addr" to be the start of a chunk, and returns its size.
506 // "addr + size" is required to be the start of a new chunk, or the end
507 // of the active area of the heap.
508 virtual size_t block_size(const HeapWord* addr) const = 0;
509
510 // Requires "addr" to be the start of a block, and returns "TRUE" iff
511 // the block is an object.
512 virtual bool block_is_obj(const HeapWord* addr) const = 0;
513
514 // Returns the longest time (in ms) that has elapsed since the last
515 // time that any part of the heap was examined by a garbage collection.
516 virtual jlong millis_since_last_gc() = 0;
517
518 // Perform any cleanup actions necessary before allowing a verification.
519 virtual void prepare_for_verify() = 0;
520
521 // Generate any dumps preceding or following a full gc
522 void pre_full_gc_dump(GCTimer* timer);
523 void post_full_gc_dump(GCTimer* timer);
524
525 VirtualSpaceSummary create_heap_space_summary();
526 GCHeapSummary create_heap_summary();
527
528 MetaspaceSummary create_metaspace_summary();
529
530 // Print heap information on the given outputStream.
531 virtual void print_on(outputStream* st) const = 0;
532 // The default behavior is to call print_on() on tty.
533 virtual void print() const {
534 print_on(tty);
535 }
536 // Print more detailed heap information on the given
537 // outputStream. The default behavior is to call print_on(). It is
538 // up to each subclass to override it and add any additional output
539 // it needs.
540 virtual void print_extended_on(outputStream* st) const {
541 print_on(st);
552 }
553 // Iterator for all GC threads (other than VM thread)
554 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
555
556 // Print any relevant tracing info that flags imply.
557 // Default implementation does nothing.
558 virtual void print_tracing_info() const = 0;
559
560 void print_heap_before_gc();
561 void print_heap_after_gc();
562
563 // Registering and unregistering an nmethod (compiled code) with the heap.
564 // Override with specific mechanism for each specialized heap type.
565 virtual void register_nmethod(nmethod* nm);
566 virtual void unregister_nmethod(nmethod* nm);
567
568 void trace_heap_before_gc(const GCTracer* gc_tracer);
569 void trace_heap_after_gc(const GCTracer* gc_tracer);
570
571 // Heap verification
572 virtual void verify(bool silent, VerifyOption option) = 0;
573
574 // Non product verification and debugging.
575 #ifndef PRODUCT
576 // Support for PromotionFailureALot. Return true if it's time to cause a
577 // promotion failure. The no-argument version uses
578 // this->_promotion_failure_alot_count as the counter.
579 inline bool promotion_should_fail(volatile size_t* count);
580 inline bool promotion_should_fail();
581
582 // Reset the PromotionFailureALot counters. Should be called at the end of a
583 // GC in which promotion failure occurred.
584 inline void reset_promotion_should_fail(volatile size_t* count);
585 inline void reset_promotion_should_fail();
586 #endif // #ifndef PRODUCT
587
588 #ifdef ASSERT
589 static int fired_fake_oom() {
590 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
591 }
592 #endif
|
41 class AdaptiveSizePolicy;
42 class BarrierSet;
43 class CollectorPolicy;
44 class GCHeapSummary;
45 class GCTimer;
46 class GCTracer;
47 class MetaspaceSummary;
48 class Thread;
49 class ThreadClosure;
50 class VirtualSpaceSummary;
51 class nmethod;
52
53 class GCMessage : public FormatBuffer<1024> {
54 public:
55 bool is_before;
56
57 public:
58 GCMessage() {}
59 };
60
61 class CollectedHeap;
62
63 class GCHeapLog : public EventLogBase<GCMessage> {
64 private:
65 void log_heap(CollectedHeap* heap, bool before);
66
67 public:
68 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
69
70 void log_heap_before(CollectedHeap* heap) {
71 log_heap(heap, true);
72 }
73 void log_heap_after(CollectedHeap* heap) {
74 log_heap(heap, false);
75 }
76 };
77
78 //
79 // CollectedHeap
80 // GenCollectedHeap
81 // G1CollectedHeap
82 // ParallelScavengeHeap
83 //
84 class CollectedHeap : public CHeapObj<mtInternal> {
85 friend class VMStructs;
86 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
87
88 private:
89 #ifdef ASSERT
90 static int _fire_out_of_memory_count;
91 #endif
92
93 GCHeapLog* _gc_heap_log;
94
180 // Verification functions
181 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
182 PRODUCT_RETURN;
183 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
184 PRODUCT_RETURN;
185 debug_only(static void check_for_valid_allocation_state();)
186
187 public:
188 enum Name {
189 GenCollectedHeap,
190 ParallelScavengeHeap,
191 G1CollectedHeap
192 };
193
194 static inline size_t filler_array_max_size() {
195 return _filler_array_max_size;
196 }
197
198 virtual Name kind() const = 0;
199
200 virtual const char* name() const = 0;
201
202 /**
203 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
204 * and JNI_OK on success.
205 */
206 virtual jint initialize() = 0;
207
208 // In many heaps, there will be a need to perform some initialization activities
209 // after the Universe is fully formed, but before general heap allocation is allowed.
210 // This is the correct place to place such initialization methods.
211 virtual void post_initialize();
212
213 // Stop any onging concurrent work and prepare for exit.
214 virtual void stop() {}
215
216 void initialize_reserved_region(HeapWord *start, HeapWord *end);
217 MemRegion reserved_region() const { return _reserved; }
218 address base() const { return (address)reserved_region().start(); }
219
220 virtual size_t capacity() const = 0;
221 virtual size_t used() const = 0;
506 // non-object.
507 virtual HeapWord* block_start(const void* addr) const = 0;
508
509 // Requires "addr" to be the start of a chunk, and returns its size.
510 // "addr + size" is required to be the start of a new chunk, or the end
511 // of the active area of the heap.
512 virtual size_t block_size(const HeapWord* addr) const = 0;
513
514 // Requires "addr" to be the start of a block, and returns "TRUE" iff
515 // the block is an object.
516 virtual bool block_is_obj(const HeapWord* addr) const = 0;
517
518 // Returns the longest time (in ms) that has elapsed since the last
519 // time that any part of the heap was examined by a garbage collection.
520 virtual jlong millis_since_last_gc() = 0;
521
522 // Perform any cleanup actions necessary before allowing a verification.
523 virtual void prepare_for_verify() = 0;
524
525 // Generate any dumps preceding or following a full gc
526 private:
527 void full_gc_dump(GCTimer* timer, const char* when);
528 public:
529 void pre_full_gc_dump(GCTimer* timer);
530 void post_full_gc_dump(GCTimer* timer);
531
532 VirtualSpaceSummary create_heap_space_summary();
533 GCHeapSummary create_heap_summary();
534
535 MetaspaceSummary create_metaspace_summary();
536
537 // Print heap information on the given outputStream.
538 virtual void print_on(outputStream* st) const = 0;
539 // The default behavior is to call print_on() on tty.
540 virtual void print() const {
541 print_on(tty);
542 }
543 // Print more detailed heap information on the given
544 // outputStream. The default behavior is to call print_on(). It is
545 // up to each subclass to override it and add any additional output
546 // it needs.
547 virtual void print_extended_on(outputStream* st) const {
548 print_on(st);
559 }
560 // Iterator for all GC threads (other than VM thread)
561 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
562
563 // Print any relevant tracing info that flags imply.
564 // Default implementation does nothing.
565 virtual void print_tracing_info() const = 0;
566
567 void print_heap_before_gc();
568 void print_heap_after_gc();
569
570 // Registering and unregistering an nmethod (compiled code) with the heap.
571 // Override with specific mechanism for each specialized heap type.
572 virtual void register_nmethod(nmethod* nm);
573 virtual void unregister_nmethod(nmethod* nm);
574
575 void trace_heap_before_gc(const GCTracer* gc_tracer);
576 void trace_heap_after_gc(const GCTracer* gc_tracer);
577
578 // Heap verification
579 virtual void verify(VerifyOption option) = 0;
580
581 // Non product verification and debugging.
582 #ifndef PRODUCT
583 // Support for PromotionFailureALot. Return true if it's time to cause a
584 // promotion failure. The no-argument version uses
585 // this->_promotion_failure_alot_count as the counter.
586 inline bool promotion_should_fail(volatile size_t* count);
587 inline bool promotion_should_fail();
588
589 // Reset the PromotionFailureALot counters. Should be called at the end of a
590 // GC in which promotion failure occurred.
591 inline void reset_promotion_should_fail(volatile size_t* count);
592 inline void reset_promotion_should_fail();
593 #endif // #ifndef PRODUCT
594
595 #ifdef ASSERT
596 static int fired_fake_oom() {
597 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
598 }
599 #endif
|