1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
27
28 #include "gc_interface/gcCause.hpp"
29 #include "memory/allocation.hpp"
30 #include "memory/barrierSet.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/perfData.hpp"
33 #include "runtime/safepoint.hpp"
34 #include "utilities/events.hpp"
35
36 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
37 // is an abstract class: there may be many different kinds of heaps. This
38 // class defines the functions that a heap must implement, and contains
39 // infrastructure common to all heaps.
40
41 class BarrierSet;
42 class ThreadClosure;
43 class AdaptiveSizePolicy;
44 class Thread;
45 class CollectorPolicy;
46
47 class GCMessage : public FormatBuffer<1024> {
48 public:
49 bool is_before;
50
51 public:
52 GCMessage() {}
53 };
54
55 class GCHeapLog : public EventLogBase<GCMessage> {
56 private:
57 void log_heap(bool before);
58
59 public:
60 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
61
62 void log_heap_before() {
63 log_heap(true);
64 }
65 void log_heap_after() {
111 // Constructor
112 CollectedHeap();
113
114 // Do common initializations that must follow instance construction,
115 // for example, those needing virtual calls.
116 // This code could perhaps be moved into initialize() but would
117 // be slightly more awkward because we want the latter to be a
118 // pure virtual.
119 void pre_initialize();
120
121 // Create a new tlab. All TLAB allocations must go through this.
122 virtual HeapWord* allocate_new_tlab(size_t size);
123
124 // Accumulate statistics on all tlabs.
125 virtual void accumulate_statistics_all_tlabs();
126
127 // Reinitialize tlabs before resuming mutators.
128 virtual void resize_all_tlabs();
129
130 // Allocate from the current thread's TLAB, with broken-out slow path.
131 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
132 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
133
134 // Allocate an uninitialized block of the given size, or returns NULL if
135 // this is impossible.
136 inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
137
138 // Like allocate_init, but the block returned by a successful allocation
139 // is guaranteed initialized to zeros.
140 inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
141
142 // Helper functions for (VM) allocation.
143 inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
144 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
145 HeapWord* objPtr);
146
147 inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
148
149 inline static void post_allocation_setup_array(KlassHandle klass,
150 HeapWord* obj, int length);
151
152 // Clears an allocated object.
153 inline static void init_obj(HeapWord* obj, size_t size);
154
155 // Filler object utilities.
156 static inline size_t filler_array_hdr_size();
157 static inline size_t filler_array_min_size();
158
159 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
160 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
161
162 // Fill with a single array; caller must ensure filler_array_min_size() <=
163 // words <= filler_array_max_size().
164 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
165
166 // Fill with a single object (either an int array or a java.lang.Object).
167 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
168
169 // Verification functions
170 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
171 PRODUCT_RETURN;
172 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
173 PRODUCT_RETURN;
174 debug_only(static void check_for_valid_allocation_state();)
175
176 public:
177 enum Name {
178 Abstract,
179 SharedHeap,
180 GenCollectedHeap,
181 ParallelScavengeHeap,
182 G1CollectedHeap
183 };
184
185 static inline size_t filler_array_max_size() {
186 return _filler_array_max_size;
187 }
188
189 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
190
191 /**
192 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
193 * and JNI_OK on success.
194 */
195 virtual jint initialize() = 0;
196
197 // In many heaps, there will be a need to perform some initialization activities
198 // after the Universe is fully formed, but before general heap allocation is allowed.
199 // This is the correct place to place such initialization methods.
200 virtual void post_initialize() = 0;
201
202 MemRegion reserved_region() const { return _reserved; }
203 address base() const { return (address)reserved_region().start(); }
204
205 // Future cleanup here. The following functions should specify bytes or
206 // heapwords as part of their signature.
207 virtual size_t capacity() const = 0;
208 virtual size_t used() const = 0;
209
210 // Return "true" if the part of the heap that allocates Java
211 // objects has reached the maximal committed limit that it can
212 // reach, without a garbage collection.
213 virtual bool is_maximal_no_gc() const = 0;
214
215 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
216 // memory that the vm could make available for storing 'normal' java objects.
217 // This is based on the reserved address space, but should not include space
218 // that the vm uses internally for bookkeeping or temporary storage
219 // (e.g., in the case of the young gen, one of the survivor
220 // spaces).
221 virtual size_t max_capacity() const = 0;
222
223 // Returns "TRUE" if "p" points into the reserved area of the heap.
224 bool is_in_reserved(const void* p) const {
225 return _reserved.contains(p);
226 }
533 // non-object.
534 virtual HeapWord* block_start(const void* addr) const = 0;
535
536 // Requires "addr" to be the start of a chunk, and returns its size.
537 // "addr + size" is required to be the start of a new chunk, or the end
538 // of the active area of the heap.
539 virtual size_t block_size(const HeapWord* addr) const = 0;
540
541 // Requires "addr" to be the start of a block, and returns "TRUE" iff
542 // the block is an object.
543 virtual bool block_is_obj(const HeapWord* addr) const = 0;
544
545 // Returns the longest time (in ms) that has elapsed since the last
546 // time that any part of the heap was examined by a garbage collection.
547 virtual jlong millis_since_last_gc() = 0;
548
549 // Perform any cleanup actions necessary before allowing a verification.
550 virtual void prepare_for_verify() = 0;
551
552 // Generate any dumps preceding or following a full gc
553 void pre_full_gc_dump();
554 void post_full_gc_dump();
555
556 // Print heap information on the given outputStream.
557 virtual void print_on(outputStream* st) const = 0;
558 // The default behavior is to call print_on() on tty.
559 virtual void print() const {
560 print_on(tty);
561 }
562 // Print more detailed heap information on the given
563 // outputStream. The default behaviour is to call print_on(). It is
564 // up to each subclass to override it and add any additional output
565 // it needs.
566 virtual void print_extended_on(outputStream* st) const {
567 print_on(st);
568 }
569
570 virtual void print_on_error(outputStream* st) const {
571 st->print_cr("Heap:");
572 print_extended_on(st);
573 st->cr();
574
575 _barrier_set->print_on(st);
576 }
577
578 // Print all GC threads (other than the VM thread)
579 // used by this heap.
580 virtual void print_gc_threads_on(outputStream* st) const = 0;
581 // The default behavior is to call print_gc_threads_on() on tty.
582 void print_gc_threads() {
583 print_gc_threads_on(tty);
584 }
585 // Iterator for all GC threads (other than VM thread)
586 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
587
588 // Print any relevant tracing info that flags imply.
589 // Default implementation does nothing.
590 virtual void print_tracing_info() const = 0;
591
592 // If PrintHeapAtGC is set call the appropriate routi
593 void print_heap_before_gc() {
594 if (PrintHeapAtGC) {
595 Universe::print_heap_before_gc();
596 }
597 if (_gc_heap_log != NULL) {
598 _gc_heap_log->log_heap_before();
599 }
600 }
601 void print_heap_after_gc() {
602 if (PrintHeapAtGC) {
603 Universe::print_heap_after_gc();
604 }
605 if (_gc_heap_log != NULL) {
606 _gc_heap_log->log_heap_after();
607 }
608 }
609
610 // Heap verification
611 virtual void verify(bool silent, VerifyOption option) = 0;
612
613 // Non product verification and debugging.
614 #ifndef PRODUCT
615 // Support for PromotionFailureALot. Return true if it's time to cause a
616 // promotion failure. The no-argument version uses
617 // this->_promotion_failure_alot_count as the counter.
618 inline bool promotion_should_fail(volatile size_t* count);
619 inline bool promotion_should_fail();
620
621 // Reset the PromotionFailureALot counters. Should be called at the end of a
622 // GC in which promotion failure ocurred.
623 inline void reset_promotion_should_fail(volatile size_t* count);
624 inline void reset_promotion_should_fail();
625 #endif // #ifndef PRODUCT
626
627 #ifdef ASSERT
628 static int fired_fake_oom() {
629 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
630 }
631 #endif
632
633 public:
634 // This is a convenience method that is used in cases where
635 // the actual number of GC worker threads is not pertinent but
636 // only whether there more than 0. Use of this method helps
637 // reduce the occurrence of ParallelGCThreads to uses where the
638 // actual number may be germane.
639 static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
640
641 /////////////// Unit tests ///////////////
642
|
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
27
28 #include "gc_interface/gcCause.hpp"
29 #include "gc_implementation/shared/gcWhen.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/barrierSet.hpp"
32 #include "runtime/handles.hpp"
33 #include "runtime/perfData.hpp"
34 #include "runtime/safepoint.hpp"
35 #include "utilities/events.hpp"
36
37 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
38 // is an abstract class: there may be many different kinds of heaps. This
39 // class defines the functions that a heap must implement, and contains
40 // infrastructure common to all heaps.
41
42 class AdaptiveSizePolicy;
43 class BarrierSet;
44 class CollectorPolicy;
45 class GCHeapSummary;
46 class GCTimer;
47 class GCTracer;
48 class MetaspaceSummary;
49 class Thread;
50 class ThreadClosure;
51 class VirtualSpaceSummary;
52
53 class GCMessage : public FormatBuffer<1024> {
54 public:
55 bool is_before;
56
57 public:
58 GCMessage() {}
59 };
60
61 class GCHeapLog : public EventLogBase<GCMessage> {
62 private:
63 void log_heap(bool before);
64
65 public:
66 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
67
68 void log_heap_before() {
69 log_heap(true);
70 }
71 void log_heap_after() {
117 // Constructor
118 CollectedHeap();
119
120 // Do common initializations that must follow instance construction,
121 // for example, those needing virtual calls.
122 // This code could perhaps be moved into initialize() but would
123 // be slightly more awkward because we want the latter to be a
124 // pure virtual.
125 void pre_initialize();
126
127 // Create a new tlab. All TLAB allocations must go through this.
128 virtual HeapWord* allocate_new_tlab(size_t size);
129
130 // Accumulate statistics on all tlabs.
131 virtual void accumulate_statistics_all_tlabs();
132
133 // Reinitialize tlabs before resuming mutators.
134 virtual void resize_all_tlabs();
135
136 // Allocate from the current thread's TLAB, with broken-out slow path.
137 inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size);
138 static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size);
139
140 // Allocate an uninitialized block of the given size, or returns NULL if
141 // this is impossible.
142 inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS);
143
144 // Like allocate_init, but the block returned by a successful allocation
145 // is guaranteed initialized to zeros.
146 inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS);
147
148 // Helper functions for (VM) allocation.
149 inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
150 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
151 HeapWord* objPtr);
152
153 inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
154
155 inline static void post_allocation_setup_array(KlassHandle klass,
156 HeapWord* obj, int length);
157
158 // Clears an allocated object.
159 inline static void init_obj(HeapWord* obj, size_t size);
160
161 // Filler object utilities.
162 static inline size_t filler_array_hdr_size();
163 static inline size_t filler_array_min_size();
164
165 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
166 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
167
168 // Fill with a single array; caller must ensure filler_array_min_size() <=
169 // words <= filler_array_max_size().
170 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
171
172 // Fill with a single object (either an int array or a java.lang.Object).
173 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
174
175 virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
176
177 // Verification functions
178 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
179 PRODUCT_RETURN;
180 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
181 PRODUCT_RETURN;
182 debug_only(static void check_for_valid_allocation_state();)
183
184 public:
185 enum Name {
186 Abstract,
187 SharedHeap,
188 GenCollectedHeap,
189 ParallelScavengeHeap,
190 G1CollectedHeap
191 };
192
193 static inline size_t filler_array_max_size() {
194 return _filler_array_max_size;
195 }
196
197 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
198
199 /**
200 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
201 * and JNI_OK on success.
202 */
203 virtual jint initialize() = 0;
204
205 // In many heaps, there will be a need to perform some initialization activities
206 // after the Universe is fully formed, but before general heap allocation is allowed.
207 // This is the correct place to place such initialization methods.
208 virtual void post_initialize() = 0;
209
210 MemRegion reserved_region() const { return _reserved; }
211 address base() const { return (address)reserved_region().start(); }
212
213 virtual size_t capacity() const = 0;
214 virtual size_t used() const = 0;
215
216 // Return "true" if the part of the heap that allocates Java
217 // objects has reached the maximal committed limit that it can
218 // reach, without a garbage collection.
219 virtual bool is_maximal_no_gc() const = 0;
220
221 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
222 // memory that the vm could make available for storing 'normal' java objects.
223 // This is based on the reserved address space, but should not include space
224 // that the vm uses internally for bookkeeping or temporary storage
225 // (e.g., in the case of the young gen, one of the survivor
226 // spaces).
227 virtual size_t max_capacity() const = 0;
228
229 // Returns "TRUE" if "p" points into the reserved area of the heap.
230 bool is_in_reserved(const void* p) const {
231 return _reserved.contains(p);
232 }
539 // non-object.
540 virtual HeapWord* block_start(const void* addr) const = 0;
541
542 // Requires "addr" to be the start of a chunk, and returns its size.
543 // "addr + size" is required to be the start of a new chunk, or the end
544 // of the active area of the heap.
545 virtual size_t block_size(const HeapWord* addr) const = 0;
546
547 // Requires "addr" to be the start of a block, and returns "TRUE" iff
548 // the block is an object.
549 virtual bool block_is_obj(const HeapWord* addr) const = 0;
550
551 // Returns the longest time (in ms) that has elapsed since the last
552 // time that any part of the heap was examined by a garbage collection.
553 virtual jlong millis_since_last_gc() = 0;
554
555 // Perform any cleanup actions necessary before allowing a verification.
556 virtual void prepare_for_verify() = 0;
557
558 // Generate any dumps preceding or following a full gc
559 void pre_full_gc_dump(GCTimer* timer);
560 void post_full_gc_dump(GCTimer* timer);
561
562 VirtualSpaceSummary create_heap_space_summary();
563 GCHeapSummary create_heap_summary();
564
565 MetaspaceSummary create_metaspace_summary();
566
567 // Print heap information on the given outputStream.
568 virtual void print_on(outputStream* st) const = 0;
569 // The default behavior is to call print_on() on tty.
570 virtual void print() const {
571 print_on(tty);
572 }
573 // Print more detailed heap information on the given
574 // outputStream. The default behavior is to call print_on(). It is
575 // up to each subclass to override it and add any additional output
576 // it needs.
577 virtual void print_extended_on(outputStream* st) const {
578 print_on(st);
579 }
580
581 virtual void print_on_error(outputStream* st) const {
582 st->print_cr("Heap:");
583 print_extended_on(st);
584 st->cr();
585
586 _barrier_set->print_on(st);
587 }
588
589 // Print all GC threads (other than the VM thread)
590 // used by this heap.
591 virtual void print_gc_threads_on(outputStream* st) const = 0;
592 // The default behavior is to call print_gc_threads_on() on tty.
593 void print_gc_threads() {
594 print_gc_threads_on(tty);
595 }
596 // Iterator for all GC threads (other than VM thread)
597 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
598
599 // Print any relevant tracing info that flags imply.
600 // Default implementation does nothing.
601 virtual void print_tracing_info() const = 0;
602
603 void print_heap_before_gc();
604 void print_heap_after_gc();
605
606 void trace_heap_before_gc(GCTracer* gc_tracer);
607 void trace_heap_after_gc(GCTracer* gc_tracer);
608
609 // Heap verification
610 virtual void verify(bool silent, VerifyOption option) = 0;
611
612 // Non product verification and debugging.
613 #ifndef PRODUCT
614 // Support for PromotionFailureALot. Return true if it's time to cause a
615 // promotion failure. The no-argument version uses
616 // this->_promotion_failure_alot_count as the counter.
617 inline bool promotion_should_fail(volatile size_t* count);
618 inline bool promotion_should_fail();
619
620 // Reset the PromotionFailureALot counters. Should be called at the end of a
621 // GC in which promotion failure occurred.
622 inline void reset_promotion_should_fail(volatile size_t* count);
623 inline void reset_promotion_should_fail();
624 #endif // #ifndef PRODUCT
625
626 #ifdef ASSERT
627 static int fired_fake_oom() {
628 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
629 }
630 #endif
631
632 public:
633 // This is a convenience method that is used in cases where
634 // the actual number of GC worker threads is not pertinent but
635 // only whether there more than 0. Use of this method helps
636 // reduce the occurrence of ParallelGCThreads to uses where the
637 // actual number may be germane.
638 static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
639
640 /////////////// Unit tests ///////////////
641
|