1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)collectedHeap.hpp 1.58 07/09/07 10:56:50 JVM"
3 #endif
4 /*
5 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
33 class BarrierSet;
34 class ThreadClosure;
35 class AdaptiveSizePolicy;
36 class Thread;
37
38 //
39 // CollectedHeap
40 // SharedHeap
41 // GenCollectedHeap
42 // G1CollectedHeap
43 // ParallelScavengeHeap
44 //
45 class CollectedHeap : public CHeapObj {
46 friend class VMStructs;
47 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
48
49 #ifdef ASSERT
50 static int _fire_out_of_memory_count;
51 #endif
52
53 protected:
54 MemRegion _reserved;
55 BarrierSet* _barrier_set;
56 bool _is_gc_active;
57 unsigned int _total_collections; // ... started
58 unsigned int _total_full_collections; // ... started
59 size_t _max_heap_capacity;
60 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
61 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
62
63 // Reason for current garbage collection. Should be set to
64 // a value reflecting no collection between collections.
65 GCCause::Cause _gc_cause;
66 GCCause::Cause _gc_lastcause;
67 PerfStringVariable* _perf_gc_cause;
68 PerfStringVariable* _perf_gc_lastcause;
69
70 // Constructor
71 CollectedHeap();
72
73 // Create a new tlab
74 virtual HeapWord* allocate_new_tlab(size_t size);
75
76 // Fix up tlabs to make the heap well-formed again,
77 // optionally retiring the tlabs.
78 virtual void fill_all_tlabs(bool retire);
79
106 // If there is no permanent area, revert to common_mem_allocate_init
107 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
108
109 // Helper functions for (VM) allocation.
110 inline static void post_allocation_setup_common(KlassHandle klass,
111 HeapWord* obj, size_t size);
112 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
113 HeapWord* objPtr,
114 size_t size);
115
116 inline static void post_allocation_setup_obj(KlassHandle klass,
117 HeapWord* obj, size_t size);
118
119 inline static void post_allocation_setup_array(KlassHandle klass,
120 HeapWord* obj, size_t size,
121 int length);
122
123 // Clears an allocated object.
124 inline static void init_obj(HeapWord* obj, size_t size);
125
126 // Verification functions
127 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
128 PRODUCT_RETURN;
129 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
130 PRODUCT_RETURN;
131
132 public:
133 enum Name {
134 Abstract,
135 SharedHeap,
136 GenCollectedHeap,
137 ParallelScavengeHeap,
138 G1CollectedHeap
139 };
140
141 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
142
143 /**
144 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
145 * and JNI_OK on success.
146 */
147 virtual jint initialize() = 0;
148
149 // In many heaps, there will be a need to perform some initialization activities
150 // after the Universe is fully formed, but before general heap allocation is allowed.
151 // This is the correct place to place such initialization methods.
152 virtual void post_initialize() = 0;
153
154 MemRegion reserved_region() const { return _reserved; }
155
156 // Return the number of bytes currently reserved, committed, and used,
157 // respectively, for holding objects.
158 size_t reserved_obj_bytes() const { return _reserved.byte_size(); }
159
160 // Future cleanup here. The following functions should specify bytes or
161 // heapwords as part of their signature.
162 virtual size_t capacity() const = 0;
163 virtual size_t used() const = 0;
164
165 // Return "true" if the part of the heap that allocates Java
166 // objects has reached the maximal committed limit that it can
167 // reach, without a garbage collection.
168 virtual bool is_maximal_no_gc() const = 0;
169
170 virtual size_t permanent_capacity() const = 0;
171 virtual size_t permanent_used() const = 0;
172
173 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
174 // memory that the vm could make available for storing 'normal' java objects.
175 // This is based on the reserved address space, but should not include space
176 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
177 // perm gen space or, in the case of the young gen, one of the survivor
178 // spaces).
284 int size,
285 TRAPS);
286 inline static void post_allocation_install_obj_klass(KlassHandle klass,
287 oop obj,
288 int size);
289 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
290
291 // Raw memory allocation facilities
292 // The obj and array allocate methods are covers for these methods.
293 // The permanent allocation method should default to mem_allocate if
294 // permanent memory isn't supported.
295 virtual HeapWord* mem_allocate(size_t size,
296 bool is_noref,
297 bool is_tlab,
298 bool* gc_overhead_limit_was_exceeded) = 0;
299 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
300
301 // The boundary between a "large" and "small" array of primitives, in words.
302 virtual size_t large_typearray_limit() = 0;
303
304 // Some heaps may offer a contiguous region for shared non-blocking
305 // allocation, via inlined code (by exporting the address of the top and
306 // end fields defining the extent of the contiguous allocation region.)
307
308 // This function returns "true" iff the heap supports this kind of
309 // allocation. (Default is "no".)
310 virtual bool supports_inline_contig_alloc() const {
311 return false;
312 }
313 // These functions return the addresses of the fields that define the
314 // boundaries of the contiguous allocation area. (These fields should be
315 // physically near to one another.)
316 virtual HeapWord** top_addr() const {
317 guarantee(false, "inline contiguous allocation not supported");
318 return NULL;
319 }
320 virtual HeapWord** end_addr() const {
321 guarantee(false, "inline contiguous allocation not supported");
322 return NULL;
323 }
354 // Returns "true" iff the heap supports thread-local allocation buffers.
355 // The default is "no".
356 virtual bool supports_tlab_allocation() const {
357 return false;
358 }
359 // The amount of space available for thread-local allocation buffers.
360 virtual size_t tlab_capacity(Thread *thr) const {
361 guarantee(false, "thread-local allocation buffers not supported");
362 return 0;
363 }
364 // An estimate of the maximum allocation that could be performed
365 // for thread-local allocation buffers without triggering any
366 // collection or expansion activity.
367 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
368 guarantee(false, "thread-local allocation buffers not supported");
369 return 0;
370 }
371 // Can a compiler initialize a new object without store barriers?
372 // This permission only extends from the creation of a new object
373 // via a TLAB up to the first subsequent safepoint.
374 virtual bool can_elide_tlab_store_barriers() const {
375 guarantee(kind() < CollectedHeap::G1CollectedHeap, "else change or refactor this");
376 return true;
377 }
378 // If a compiler is eliding store barriers for TLAB-allocated objects,
379 // there is probably a corresponding slow path which can produce
380 // an object allocated anywhere. The compiler's runtime support
381 // promises to call this function on such a slow-path-allocated
382 // object before performing initializations that have elided
383 // store barriers. Returns new_obj, or maybe a safer copy thereof.
384 virtual oop new_store_barrier(oop new_obj);
385
386 // Can a compiler elide a store barrier when it writes
387 // a permanent oop into the heap? Applies when the compiler
388 // is storing x to the heap, where x->is_perm() is true.
389 virtual bool can_elide_permanent_oop_store_barriers() const;
390
391 // Does this heap support heap inspection (+PrintClassHistogram?)
392 virtual bool supports_heap_inspection() const {
393 return false; // Until RFE 5023697 is implemented
394 }
395
396 // Perform a collection of the heap; intended for use in implementing
397 // "System.gc". This probably implies as full a collection as the
398 // "CollectedHeap" supports.
399 virtual void collect(GCCause::Cause cause) = 0;
400
401 // This interface assumes that it's being called by the
402 // vm thread. It collects the heap assuming that the
403 // heap lock is already held and that we are executing in
404 // the context of the vm thread.
405 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
406
407 // Returns the barrier set for this heap
408 BarrierSet* barrier_set() { return _barrier_set; }
409
410 // Returns "true" iff there is a stop-world GC in progress. (I assume
411 // that it should answer "false" for the concurrent part of a concurrent
412 // collector -- dld).
413 bool is_gc_active() const { return _is_gc_active; }
414
|
1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)collectedHeap.hpp 1.58 07/09/07 10:56:50 JVM"
3 #endif
4 /*
5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
33 class BarrierSet;
34 class ThreadClosure;
35 class AdaptiveSizePolicy;
36 class Thread;
37
38 //
39 // CollectedHeap
40 // SharedHeap
41 // GenCollectedHeap
42 // G1CollectedHeap
43 // ParallelScavengeHeap
44 //
45 class CollectedHeap : public CHeapObj {
46 friend class VMStructs;
47 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
48
49 #ifdef ASSERT
50 static int _fire_out_of_memory_count;
51 #endif
52
53 // Used for filler objects (static, but initialized in ctor).
54 static size_t _filler_array_max_size;
55
56 protected:
57 MemRegion _reserved;
58 BarrierSet* _barrier_set;
59 bool _is_gc_active;
60 unsigned int _total_collections; // ... started
61 unsigned int _total_full_collections; // ... started
62 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
63 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
64
65 // Reason for current garbage collection. Should be set to
66 // a value reflecting no collection between collections.
67 GCCause::Cause _gc_cause;
68 GCCause::Cause _gc_lastcause;
69 PerfStringVariable* _perf_gc_cause;
70 PerfStringVariable* _perf_gc_lastcause;
71
72 // Constructor
73 CollectedHeap();
74
75 // Create a new tlab
76 virtual HeapWord* allocate_new_tlab(size_t size);
77
78 // Fix up tlabs to make the heap well-formed again,
79 // optionally retiring the tlabs.
80 virtual void fill_all_tlabs(bool retire);
81
108 // If there is no permanent area, revert to common_mem_allocate_init
109 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
110
111 // Helper functions for (VM) allocation.
112 inline static void post_allocation_setup_common(KlassHandle klass,
113 HeapWord* obj, size_t size);
114 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
115 HeapWord* objPtr,
116 size_t size);
117
118 inline static void post_allocation_setup_obj(KlassHandle klass,
119 HeapWord* obj, size_t size);
120
121 inline static void post_allocation_setup_array(KlassHandle klass,
122 HeapWord* obj, size_t size,
123 int length);
124
125 // Clears an allocated object.
126 inline static void init_obj(HeapWord* obj, size_t size);
127
128 // Filler object utilities.
129 static inline size_t filler_array_hdr_size();
130 static inline size_t filler_array_min_size();
131 static inline size_t filler_array_max_size();
132
133 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
134 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
135
136 // Fill with a single array; caller must ensure filler_array_min_size() <=
137 // words <= filler_array_max_size().
138 static inline void fill_with_array(HeapWord* start, size_t words);
139
140 // Fill with a single object (either an int array or a java.lang.Object).
141 static inline void fill_with_object_impl(HeapWord* start, size_t words);
142
143 // Verification functions
144 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
145 PRODUCT_RETURN;
146 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
147 PRODUCT_RETURN;
148
149 public:
150 enum Name {
151 Abstract,
152 SharedHeap,
153 GenCollectedHeap,
154 ParallelScavengeHeap,
155 G1CollectedHeap
156 };
157
158 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
159
160 /**
161 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
162 * and JNI_OK on success.
163 */
164 virtual jint initialize() = 0;
165
166 // In many heaps, there will be a need to perform some initialization activities
167 // after the Universe is fully formed, but before general heap allocation is allowed.
168 // This is the correct place to place such initialization methods.
169 virtual void post_initialize() = 0;
170
171 MemRegion reserved_region() const { return _reserved; }
172 address base() const { return (address)reserved_region().start(); }
173
174 // Future cleanup here. The following functions should specify bytes or
175 // heapwords as part of their signature.
176 virtual size_t capacity() const = 0;
177 virtual size_t used() const = 0;
178
179 // Return "true" if the part of the heap that allocates Java
180 // objects has reached the maximal committed limit that it can
181 // reach, without a garbage collection.
182 virtual bool is_maximal_no_gc() const = 0;
183
184 virtual size_t permanent_capacity() const = 0;
185 virtual size_t permanent_used() const = 0;
186
187 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
188 // memory that the vm could make available for storing 'normal' java objects.
189 // This is based on the reserved address space, but should not include space
190 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
191 // perm gen space or, in the case of the young gen, one of the survivor
192 // spaces).
298 int size,
299 TRAPS);
300 inline static void post_allocation_install_obj_klass(KlassHandle klass,
301 oop obj,
302 int size);
303 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
304
305 // Raw memory allocation facilities
306 // The obj and array allocate methods are covers for these methods.
307 // The permanent allocation method should default to mem_allocate if
308 // permanent memory isn't supported.
309 virtual HeapWord* mem_allocate(size_t size,
310 bool is_noref,
311 bool is_tlab,
312 bool* gc_overhead_limit_was_exceeded) = 0;
313 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
314
315 // The boundary between a "large" and "small" array of primitives, in words.
316 virtual size_t large_typearray_limit() = 0;
317
318 // Utilities for turning raw memory into filler objects.
319 //
320 // min_fill_size() is the smallest region that can be filled.
321 // fill_with_objects() can fill arbitrary-sized regions of the heap using
322 // multiple objects. fill_with_object() is for regions known to be smaller
323 // than the largest array of integers; it uses a single object to fill the
324 // region and has slightly less overhead.
325 static size_t min_fill_size() {
326 return size_t(align_object_size(oopDesc::header_size()));
327 }
328
329 static void fill_with_objects(HeapWord* start, size_t words);
330
331 static void fill_with_object(HeapWord* start, size_t words);
332 static void fill_with_object(MemRegion region) {
333 fill_with_object(region.start(), region.word_size());
334 }
335 static void fill_with_object(HeapWord* start, HeapWord* end) {
336 fill_with_object(start, pointer_delta(end, start));
337 }
338
339 // Some heaps may offer a contiguous region for shared non-blocking
340 // allocation, via inlined code (by exporting the address of the top and
341 // end fields defining the extent of the contiguous allocation region.)
342
343 // This function returns "true" iff the heap supports this kind of
344 // allocation. (Default is "no".)
345 virtual bool supports_inline_contig_alloc() const {
346 return false;
347 }
348 // These functions return the addresses of the fields that define the
349 // boundaries of the contiguous allocation area. (These fields should be
350 // physically near to one another.)
351 virtual HeapWord** top_addr() const {
352 guarantee(false, "inline contiguous allocation not supported");
353 return NULL;
354 }
355 virtual HeapWord** end_addr() const {
356 guarantee(false, "inline contiguous allocation not supported");
357 return NULL;
358 }
389 // Returns "true" iff the heap supports thread-local allocation buffers.
390 // The default is "no".
391 virtual bool supports_tlab_allocation() const {
392 return false;
393 }
394 // The amount of space available for thread-local allocation buffers.
395 virtual size_t tlab_capacity(Thread *thr) const {
396 guarantee(false, "thread-local allocation buffers not supported");
397 return 0;
398 }
399 // An estimate of the maximum allocation that could be performed
400 // for thread-local allocation buffers without triggering any
401 // collection or expansion activity.
402 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
403 guarantee(false, "thread-local allocation buffers not supported");
404 return 0;
405 }
406 // Can a compiler initialize a new object without store barriers?
407 // This permission only extends from the creation of a new object
408 // via a TLAB up to the first subsequent safepoint.
409 virtual bool can_elide_tlab_store_barriers() const = 0;
410
411 // If a compiler is eliding store barriers for TLAB-allocated objects,
412 // there is probably a corresponding slow path which can produce
413 // an object allocated anywhere. The compiler's runtime support
414 // promises to call this function on such a slow-path-allocated
415 // object before performing initializations that have elided
416 // store barriers. Returns new_obj, or maybe a safer copy thereof.
417 virtual oop new_store_barrier(oop new_obj);
418
419 // Can a compiler elide a store barrier when it writes
420 // a permanent oop into the heap? Applies when the compiler
421 // is storing x to the heap, where x->is_perm() is true.
422 virtual bool can_elide_permanent_oop_store_barriers() const = 0;
423
424 // Does this heap support heap inspection (+PrintClassHistogram?)
425 virtual bool supports_heap_inspection() const = 0;
426
427 // Perform a collection of the heap; intended for use in implementing
428 // "System.gc". This probably implies as full a collection as the
429 // "CollectedHeap" supports.
430 virtual void collect(GCCause::Cause cause) = 0;
431
432 // This interface assumes that it's being called by the
433 // vm thread. It collects the heap assuming that the
434 // heap lock is already held and that we are executing in
435 // the context of the vm thread.
436 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
437
438 // Returns the barrier set for this heap
439 BarrierSet* barrier_set() { return _barrier_set; }
440
441 // Returns "true" iff there is a stop-world GC in progress. (I assume
442 // that it should answer "false" for the concurrent part of a concurrent
443 // collector -- dld).
444 bool is_gc_active() const { return _is_gc_active; }
445
|