60
61 class GCHeapLog : public EventLogBase<GCMessage> {
62 private:
63 void log_heap(bool before);
64
65 public:
66 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
67
68 void log_heap_before() {
69 log_heap(true);
70 }
71 void log_heap_after() {
72 log_heap(false);
73 }
74 };
75
76 //
77 // CollectedHeap
78 // GenCollectedHeap
79 // G1CollectedHeap
80 // ParallelScavengeHeap
81 //
82 class CollectedHeap : public CHeapObj<mtInternal> {
83 friend class VMStructs;
84 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
85
86 private:
87 #ifdef ASSERT
88 static int _fire_out_of_memory_count;
89 #endif
90
91 GCHeapLog* _gc_heap_log;
92
93 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
94 bool _defer_initial_card_mark;
95
96 MemRegion _reserved;
97
98 protected:
99 BarrierSet* _barrier_set;
168 // Fill with a single array; caller must ensure filler_array_min_size() <=
169 // words <= filler_array_max_size().
170 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
171
172 // Fill with a single object (either an int array or a java.lang.Object).
173 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
174
175 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
176
177 // Verification functions
178 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
179 PRODUCT_RETURN;
180 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
181 PRODUCT_RETURN;
182 debug_only(static void check_for_valid_allocation_state();)
183
184 public:
185 enum Name {
186 GenCollectedHeap,
187 ParallelScavengeHeap,
188 G1CollectedHeap
189 };
190
191 static inline size_t filler_array_max_size() {
192 return _filler_array_max_size;
193 }
194
195 virtual Name kind() const = 0;
196
197 /**
198 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
199 * and JNI_OK on success.
200 */
201 virtual jint initialize() = 0;
202
203 // In many heaps, there will be a need to perform some initialization activities
204 // after the Universe is fully formed, but before general heap allocation is allowed.
205 // This is the correct place to place such initialization methods.
206 virtual void post_initialize();
207
208 // Stop any onging concurrent work and prepare for exit.
209 virtual void stop() {}
210
211 void initialize_reserved_region(HeapWord *start, HeapWord *end);
212 MemRegion reserved_region() const { return _reserved; }
213 address base() const { return (address)reserved_region().start(); }
214
215 virtual size_t capacity() const = 0;
216 virtual size_t used() const = 0;
281 virtual bool is_scavengable(const void *p) = 0;
282
283 void set_gc_cause(GCCause::Cause v) {
284 if (UsePerfData) {
285 _gc_lastcause = _gc_cause;
286 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
287 _perf_gc_cause->set_value(GCCause::to_string(v));
288 }
289 _gc_cause = v;
290 }
291 GCCause::Cause gc_cause() { return _gc_cause; }
292
293 // General obj/array allocation facilities.
294 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
295 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
296 inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
297
298 inline static void post_allocation_install_obj_klass(KlassHandle klass,
299 oop obj);
300
301 // Raw memory allocation facilities
302 // The obj and array allocate methods are covers for these methods.
303 // mem_allocate() should never be
304 // called to allocate TLABs, only individual objects.
305 virtual HeapWord* mem_allocate(size_t size,
306 bool* gc_overhead_limit_was_exceeded) = 0;
307
308 // Utilities for turning raw memory into filler objects.
309 //
310 // min_fill_size() is the smallest region that can be filled.
311 // fill_with_objects() can fill arbitrary-sized regions of the heap using
312 // multiple objects. fill_with_object() is for regions known to be smaller
313 // than the largest array of integers; it uses a single object to fill the
314 // region and has slightly less overhead.
315 static size_t min_fill_size() {
316 return size_t(align_object_size(oopDesc::header_size()));
317 }
318
319 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
320
553 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
554
555 // Print any relevant tracing info that flags imply.
556 // Default implementation does nothing.
557 virtual void print_tracing_info() const = 0;
558
559 void print_heap_before_gc();
560 void print_heap_after_gc();
561
562 // Registering and unregistering an nmethod (compiled code) with the heap.
563 // Override with specific mechanism for each specialized heap type.
564 virtual void register_nmethod(nmethod* nm);
565 virtual void unregister_nmethod(nmethod* nm);
566
567 void trace_heap_before_gc(const GCTracer* gc_tracer);
568 void trace_heap_after_gc(const GCTracer* gc_tracer);
569
570 // Heap verification
571 virtual void verify(bool silent, VerifyOption option) = 0;
572
573 // Non product verification and debugging.
574 #ifndef PRODUCT
575 // Support for PromotionFailureALot. Return true if it's time to cause a
576 // promotion failure. The no-argument version uses
577 // this->_promotion_failure_alot_count as the counter.
578 inline bool promotion_should_fail(volatile size_t* count);
579 inline bool promotion_should_fail();
580
581 // Reset the PromotionFailureALot counters. Should be called at the end of a
582 // GC in which promotion failure occurred.
583 inline void reset_promotion_should_fail(volatile size_t* count);
584 inline void reset_promotion_should_fail();
585 #endif // #ifndef PRODUCT
586
587 #ifdef ASSERT
588 static int fired_fake_oom() {
589 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
590 }
591 #endif
592
593 public:
594 // Copy the current allocation context statistics for the specified contexts.
595 // For each context in contexts, set the corresponding entries in the totals
596 // and accuracy arrays to the current values held by the statistics. Each
597 // array should be of length len.
598 // Returns true if there are more stats available.
599 virtual bool copy_allocation_context_stats(const jint* contexts,
600 jlong* totals,
601 jbyte* accuracy,
602 jint len) {
603 return false;
604 }
605
606 /////////////// Unit tests ///////////////
607
608 NOT_PRODUCT(static void test_is_in();)
609 };
610
611 // Class to set and reset the GC cause for a CollectedHeap.
612
613 class GCCauseSetter : StackObj {
614 CollectedHeap* _heap;
615 GCCause::Cause _previous_cause;
616 public:
617 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
618 assert(SafepointSynchronize::is_at_safepoint(),
619 "This method manipulates heap state without locking");
620 _heap = heap;
621 _previous_cause = _heap->gc_cause();
622 _heap->set_gc_cause(cause);
623 }
|
60
61 class GCHeapLog : public EventLogBase<GCMessage> {
62 private:
63 void log_heap(bool before);
64
65 public:
66 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
67
68 void log_heap_before() {
69 log_heap(true);
70 }
71 void log_heap_after() {
72 log_heap(false);
73 }
74 };
75
76 //
77 // CollectedHeap
78 // GenCollectedHeap
79 // G1CollectedHeap
80 // ShenandoahHeap
81 // ParallelScavengeHeap
82 //
83 class CollectedHeap : public CHeapObj<mtInternal> {
84 friend class VMStructs;
85 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
86
87 private:
88 #ifdef ASSERT
89 static int _fire_out_of_memory_count;
90 #endif
91
92 GCHeapLog* _gc_heap_log;
93
94 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
95 bool _defer_initial_card_mark;
96
97 MemRegion _reserved;
98
99 protected:
100 BarrierSet* _barrier_set;
169 // Fill with a single array; caller must ensure filler_array_min_size() <=
170 // words <= filler_array_max_size().
171 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
172
173 // Fill with a single object (either an int array or a java.lang.Object).
174 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
175
176 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
177
178 // Verification functions
179 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
180 PRODUCT_RETURN;
181 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
182 PRODUCT_RETURN;
183 debug_only(static void check_for_valid_allocation_state();)
184
185 public:
186 enum Name {
187 GenCollectedHeap,
188 ParallelScavengeHeap,
189 G1CollectedHeap,
190 ShenandoahHeap
191 };
192
193 static inline size_t filler_array_max_size() {
194 return _filler_array_max_size;
195 }
196
197 virtual Name kind() const = 0;
198
199 virtual HeapWord* tlab_post_allocation_setup(HeapWord* obj);
200
201 /**
202 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
203 * and JNI_OK on success.
204 */
205 virtual jint initialize() = 0;
206
207 // In many heaps, there will be a need to perform some initialization activities
208 // after the Universe is fully formed, but before general heap allocation is allowed.
209 // This is the correct place to place such initialization methods.
210 virtual void post_initialize();
211
212 // Stop any onging concurrent work and prepare for exit.
213 virtual void stop() {}
214
215 void initialize_reserved_region(HeapWord *start, HeapWord *end);
216 MemRegion reserved_region() const { return _reserved; }
217 address base() const { return (address)reserved_region().start(); }
218
219 virtual size_t capacity() const = 0;
220 virtual size_t used() const = 0;
285 virtual bool is_scavengable(const void *p) = 0;
286
287 void set_gc_cause(GCCause::Cause v) {
288 if (UsePerfData) {
289 _gc_lastcause = _gc_cause;
290 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
291 _perf_gc_cause->set_value(GCCause::to_string(v));
292 }
293 _gc_cause = v;
294 }
295 GCCause::Cause gc_cause() { return _gc_cause; }
296
297 // General obj/array allocation facilities.
298 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
299 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
300 inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
301
302 inline static void post_allocation_install_obj_klass(KlassHandle klass,
303 oop obj);
304
305 virtual uint oop_extra_words();
306
307 #ifndef CC_INTERP
308 virtual void compile_prepare_oop(MacroAssembler* masm, Register obj);
309 #endif
310
311 // Raw memory allocation facilities
312 // The obj and array allocate methods are covers for these methods.
313 // mem_allocate() should never be
314 // called to allocate TLABs, only individual objects.
315 virtual HeapWord* mem_allocate(size_t size,
316 bool* gc_overhead_limit_was_exceeded) = 0;
317
318 // Utilities for turning raw memory into filler objects.
319 //
320 // min_fill_size() is the smallest region that can be filled.
321 // fill_with_objects() can fill arbitrary-sized regions of the heap using
322 // multiple objects. fill_with_object() is for regions known to be smaller
323 // than the largest array of integers; it uses a single object to fill the
324 // region and has slightly less overhead.
325 static size_t min_fill_size() {
326 return size_t(align_object_size(oopDesc::header_size()));
327 }
328
329 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
330
563 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
564
565 // Print any relevant tracing info that flags imply.
566 // Default implementation does nothing.
567 virtual void print_tracing_info() const = 0;
568
569 void print_heap_before_gc();
570 void print_heap_after_gc();
571
572 // Registering and unregistering an nmethod (compiled code) with the heap.
573 // Override with specific mechanism for each specialized heap type.
574 virtual void register_nmethod(nmethod* nm);
575 virtual void unregister_nmethod(nmethod* nm);
576
577 void trace_heap_before_gc(const GCTracer* gc_tracer);
578 void trace_heap_after_gc(const GCTracer* gc_tracer);
579
580 // Heap verification
581 virtual void verify(bool silent, VerifyOption option) = 0;
582
583 // Shut down all GC workers and other GC related threads.
584 virtual void shutdown();
585
586 // Accumulate additional statistics from GCLABs.
587 virtual void accumulate_statistics_all_gclabs();
588
589 // Non product verification and debugging.
590 #ifndef PRODUCT
591 // Support for PromotionFailureALot. Return true if it's time to cause a
592 // promotion failure. The no-argument version uses
593 // this->_promotion_failure_alot_count as the counter.
594 inline bool promotion_should_fail(volatile size_t* count);
595 inline bool promotion_should_fail();
596
597 // Reset the PromotionFailureALot counters. Should be called at the end of a
598 // GC in which promotion failure occurred.
599 inline void reset_promotion_should_fail(volatile size_t* count);
600 inline void reset_promotion_should_fail();
601 #endif // #ifndef PRODUCT
602
603 #ifdef ASSERT
604 static int fired_fake_oom() {
605 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
606 }
607 #endif
608
609 public:
610 // Copy the current allocation context statistics for the specified contexts.
611 // For each context in contexts, set the corresponding entries in the totals
612 // and accuracy arrays to the current values held by the statistics. Each
613 // array should be of length len.
614 // Returns true if there are more stats available.
615 virtual bool copy_allocation_context_stats(const jint* contexts,
616 jlong* totals,
617 jbyte* accuracy,
618 jint len) {
619 return false;
620 }
621
622 virtual bool is_obj_ill(const oop obj) const {
623 return true;
624 }
625
626 /////////////// Unit tests ///////////////
627
628 NOT_PRODUCT(static void test_is_in();)
629 };
630
631 // Class to set and reset the GC cause for a CollectedHeap.
632
633 class GCCauseSetter : StackObj {
634 CollectedHeap* _heap;
635 GCCause::Cause _previous_cause;
636 public:
637 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
638 assert(SafepointSynchronize::is_at_safepoint(),
639 "This method manipulates heap state without locking");
640 _heap = heap;
641 _previous_cause = _heap->gc_cause();
642 _heap->set_gc_cause(cause);
643 }
|