0 /*
1 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
|
0 /*
1 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
|
83 //
84 // CollectedHeap
85 // GenCollectedHeap
86 // SerialHeap
87 // CMSHeap
88 // G1CollectedHeap
89 // ParallelScavengeHeap
90 //
91 class CollectedHeap : public CHeapObj<mtInternal> {
92 friend class VMStructs;
93 friend class JVMCIVMStructs;
94 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
95
96 private:
97 #ifdef ASSERT
98 static int _fire_out_of_memory_count;
99 #endif
100
101 GCHeapLog* _gc_heap_log;
102
103 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
104 // or INCLUDE_JVMCI is being used
105 bool _defer_initial_card_mark;
106
107 MemRegion _reserved;
108
109 protected:
110 BarrierSet* _barrier_set;
111 bool _is_gc_active;
112
113 // Used for filler objects (static, but initialized in ctor).
114 static size_t _filler_array_max_size;
115
116 unsigned int _total_collections; // ... started
117 unsigned int _total_full_collections; // ... started
118 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
119 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
120
121 // Reason for current garbage collection. Should be set to
122 // a value reflecting no collection between collections.
123 GCCause::Cause _gc_cause;
124 GCCause::Cause _gc_lastcause;
125 PerfStringVariable* _perf_gc_cause;
126 PerfStringVariable* _perf_gc_lastcause;
127
128 // Constructor
129 CollectedHeap();
130
131 // Do common initializations that must follow instance construction,
132 // for example, those needing virtual calls.
133 // This code could perhaps be moved into initialize() but would
134 // be slightly more awkward because we want the latter to be a
135 // pure virtual.
136 void pre_initialize();
137
138 // Create a new tlab. All TLAB allocations must go through this.
139 virtual HeapWord* allocate_new_tlab(size_t size);
140
141 // Accumulate statistics on all tlabs.
142 virtual void accumulate_statistics_all_tlabs();
143
144 // Reinitialize tlabs before resuming mutators.
145 virtual void resize_all_tlabs();
146
147 // Allocate from the current thread's TLAB, with broken-out slow path.
148 inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
149 static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
150
151 // Allocate an uninitialized block of the given size, or returns NULL if
152 // this is impossible.
153 inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
154
155 // Like allocate_init, but the block returned by a successful allocation
156 // is guaranteed initialized to zeros.
|
83 //
84 // CollectedHeap
85 // GenCollectedHeap
86 // SerialHeap
87 // CMSHeap
88 // G1CollectedHeap
89 // ParallelScavengeHeap
90 //
91 class CollectedHeap : public CHeapObj<mtInternal> {
92 friend class VMStructs;
93 friend class JVMCIVMStructs;
94 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
95
96 private:
97 #ifdef ASSERT
98 static int _fire_out_of_memory_count;
99 #endif
100
101 GCHeapLog* _gc_heap_log;
102
103 MemRegion _reserved;
104
105 protected:
106 BarrierSet* _barrier_set;
107 bool _is_gc_active;
108
109 // Used for filler objects (static, but initialized in ctor).
110 static size_t _filler_array_max_size;
111
112 unsigned int _total_collections; // ... started
113 unsigned int _total_full_collections; // ... started
114 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
115 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
116
117 // Reason for current garbage collection. Should be set to
118 // a value reflecting no collection between collections.
119 GCCause::Cause _gc_cause;
120 GCCause::Cause _gc_lastcause;
121 PerfStringVariable* _perf_gc_cause;
122 PerfStringVariable* _perf_gc_lastcause;
123
124 // Constructor
125 CollectedHeap();
126
127 // Create a new tlab. All TLAB allocations must go through this.
128 virtual HeapWord* allocate_new_tlab(size_t size);
129
130 // Accumulate statistics on all tlabs.
131 virtual void accumulate_statistics_all_tlabs();
132
133 // Reinitialize tlabs before resuming mutators.
134 virtual void resize_all_tlabs();
135
136 // Allocate from the current thread's TLAB, with broken-out slow path.
137 inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
138 static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
139
140 // Allocate an uninitialized block of the given size, or returns NULL if
141 // this is impossible.
142 inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
143
144 // Like allocate_init, but the block returned by a successful allocation
145 // is guaranteed initialized to zeros.
|
389 // the following methods:
390 // Returns "true" iff the heap supports thread-local allocation buffers.
391 // The default is "no".
392 virtual bool supports_tlab_allocation() const = 0;
393
394 // The amount of space available for thread-local allocation buffers.
395 virtual size_t tlab_capacity(Thread *thr) const = 0;
396
397 // The amount of used space for thread-local allocation buffers for the given thread.
398 virtual size_t tlab_used(Thread *thr) const = 0;
399
400 virtual size_t max_tlab_size() const;
401
402 // An estimate of the maximum allocation that could be performed
403 // for thread-local allocation buffers without triggering any
404 // collection or expansion activity.
405 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
406 guarantee(false, "thread-local allocation buffers not supported");
407 return 0;
408 }
409
410 // Can a compiler initialize a new object without store barriers?
411 // This permission only extends from the creation of a new object
412 // via a TLAB up to the first subsequent safepoint. If such permission
413 // is granted for this heap type, the compiler promises to call
414 // defer_store_barrier() below on any slow path allocation of
415 // a new object for which such initializing store barriers will
416 // have been elided.
417 virtual bool can_elide_tlab_store_barriers() const = 0;
418
419 // If a compiler is eliding store barriers for TLAB-allocated objects,
420 // there is probably a corresponding slow path which can produce
421 // an object allocated anywhere. The compiler's runtime support
422 // promises to call this function on such a slow-path-allocated
423 // object before performing initializations that have elided
424 // store barriers. Returns new_obj, or maybe a safer copy thereof.
425 virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
426
427 // Answers whether an initializing store to a new object currently
428 // allocated at the given address doesn't need a store
429 // barrier. Returns "true" if it doesn't need an initializing
430 // store barrier; answers "false" if it does.
431 virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
432
433 // If a compiler is eliding store barriers for TLAB-allocated objects,
434 // we will be informed of a slow-path allocation by a call
435 // to new_store_pre_barrier() above. Such a call precedes the
436 // initialization of the object itself, and no post-store-barriers will
437 // be issued. Some heap types require that the barrier strictly follows
438 // the initializing stores. (This is currently implemented by deferring the
439 // barrier until the next slow-path allocation or gc-related safepoint.)
440 // This interface answers whether a particular heap type needs the card
441 // mark to be thus strictly sequenced after the stores.
442 virtual bool card_mark_must_follow_store() const = 0;
443
444 // If the CollectedHeap was asked to defer a store barrier above,
445 // this informs it to flush such a deferred store barrier to the
446 // remembered set.
447 virtual void flush_deferred_store_barrier(JavaThread* thread);
448
449 // Perform a collection of the heap; intended for use in implementing
450 // "System.gc". This probably implies as full a collection as the
451 // "CollectedHeap" supports.
452 virtual void collect(GCCause::Cause cause) = 0;
453
454 // Perform a full collection
455 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
456
457 // This interface assumes that it's being called by the
458 // vm thread. It collects the heap assuming that the
459 // heap lock is already held and that we are executing in
460 // the context of the vm thread.
461 virtual void collect_as_vm_thread(GCCause::Cause cause);
462
463 // Returns the barrier set for this heap
464 BarrierSet* barrier_set() { return _barrier_set; }
465 void set_barrier_set(BarrierSet* barrier_set);
466
|
378 // the following methods:
379 // Returns "true" iff the heap supports thread-local allocation buffers.
380 // The default is "no".
381 virtual bool supports_tlab_allocation() const = 0;
382
383 // The amount of space available for thread-local allocation buffers.
384 virtual size_t tlab_capacity(Thread *thr) const = 0;
385
386 // The amount of used space for thread-local allocation buffers for the given thread.
387 virtual size_t tlab_used(Thread *thr) const = 0;
388
389 virtual size_t max_tlab_size() const;
390
391 // An estimate of the maximum allocation that could be performed
392 // for thread-local allocation buffers without triggering any
393 // collection or expansion activity.
394 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
395 guarantee(false, "thread-local allocation buffers not supported");
396 return 0;
397 }
398
399 // Perform a collection of the heap; intended for use in implementing
400 // "System.gc". This probably implies as full a collection as the
401 // "CollectedHeap" supports.
402 virtual void collect(GCCause::Cause cause) = 0;
403
404 // Perform a full collection
405 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
406
407 // This interface assumes that it's being called by the
408 // vm thread. It collects the heap assuming that the
409 // heap lock is already held and that we are executing in
410 // the context of the vm thread.
411 virtual void collect_as_vm_thread(GCCause::Cause cause);
412
413 // Returns the barrier set for this heap
414 BarrierSet* barrier_set() { return _barrier_set; }
415 void set_barrier_set(BarrierSet* barrier_set);
416
|