rev 57156 : imported patch 8234796-v3
1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/gcHeapSummary.hpp"
26 #include "gc/shared/suspendibleThreadSet.hpp"
27 #include "gc/z/zCollectedHeap.hpp"
28 #include "gc/z/zGlobals.hpp"
29 #include "gc/z/zHeap.inline.hpp"
30 #include "gc/z/zNMethod.hpp"
31 #include "gc/z/zObjArrayAllocator.hpp"
32 #include "gc/z/zOop.inline.hpp"
33 #include "gc/z/zServiceability.hpp"
34 #include "gc/z/zStat.hpp"
35 #include "gc/z/zUtils.inline.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/universe.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/align.hpp"
40
41 ZCollectedHeap* ZCollectedHeap::heap() {
42 CollectedHeap* heap = Universe::heap();
43 assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
44 assert(heap->kind() == CollectedHeap::Z, "Invalid name");
45 return (ZCollectedHeap*)heap;
46 }
47
48 ZCollectedHeap::ZCollectedHeap() :
49 _soft_ref_policy(),
50 _barrier_set(),
51 _initialize(&_barrier_set),
52 _heap(),
53 _director(new ZDirector()),
54 _driver(new ZDriver()),
55 _uncommitter(new ZUncommitter()),
56 _stat(new ZStat()),
57 _runtime_workers() {}
58
59 CollectedHeap::Name ZCollectedHeap::kind() const {
60 return CollectedHeap::Z;
61 }
62
63 const char* ZCollectedHeap::name() const {
64 return ZName;
65 }
66
67 jint ZCollectedHeap::initialize() {
68 if (!_heap.is_initialized()) {
69 return JNI_ENOMEM;
70 }
71
72 Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX);
73
74 return JNI_OK;
75 }
76
77 void ZCollectedHeap::initialize_serviceability() {
78 _heap.serviceability_initialize();
79 }
80
81 void ZCollectedHeap::stop() {
82 _director->stop();
83 _driver->stop();
84 _uncommitter->stop();
85 _stat->stop();
86 }
87
88 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
89 return &_soft_ref_policy;
90 }
91
92 size_t ZCollectedHeap::max_capacity() const {
93 return _heap.max_capacity();
94 }
95
96 size_t ZCollectedHeap::capacity() const {
97 return _heap.capacity();
98 }
99
100 size_t ZCollectedHeap::used() const {
101 return _heap.used();
102 }
103
104 size_t ZCollectedHeap::unused() const {
105 return _heap.unused();
106 }
107
108 bool ZCollectedHeap::is_maximal_no_gc() const {
109 // Not supported
110 ShouldNotReachHere();
111 return false;
112 }
113
114 bool ZCollectedHeap::is_in(const void* p) const {
115 return _heap.is_in((uintptr_t)p);
116 }
117
118 uint32_t ZCollectedHeap::hash_oop(oop obj) const {
119 return _heap.hash_oop(ZOop::to_address(obj));
120 }
121
122 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
123 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
124 const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
125
126 if (addr != 0) {
127 *actual_size = requested_size;
128 }
129
130 return (HeapWord*)addr;
131 }
132
133 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
134 if (!do_zero) {
135 return CollectedHeap::array_allocate(klass, size, length, false /* do_zero */, THREAD);
136 }
137
138 ZObjArrayAllocator allocator(klass, size, length, THREAD);
139 return allocator.allocate();
140 }
141
142 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
143 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
144 return (HeapWord*)_heap.alloc_object(size_in_bytes);
145 }
146
147 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
148 size_t size,
149 Metaspace::MetadataType mdtype) {
150 MetaWord* result;
151
152 // Start asynchronous GC
153 collect(GCCause::_metadata_GC_threshold);
154
155 // Expand and retry allocation
156 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
157 if (result != NULL) {
158 return result;
159 }
160
161 // Start synchronous GC
162 collect(GCCause::_metadata_GC_clear_soft_refs);
163
164 // Retry allocation
165 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
166 if (result != NULL) {
167 return result;
168 }
169
170 // Expand and retry allocation
171 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
172 if (result != NULL) {
173 return result;
174 }
175
176 // Out of memory
177 return NULL;
178 }
179
180 void ZCollectedHeap::collect(GCCause::Cause cause) {
181 _driver->collect(cause);
182 }
183
184 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
185 // These collection requests are ignored since ZGC can't run a synchronous
186 // GC cycle from within the VM thread. This is considered benign, since the
187 // only GC causes coming in here should be heap dumper and heap inspector.
188 // However, neither the heap dumper nor the heap inspector really need a GC
189 // to happen, but the result of their heap iterations might in that case be
190 // less accurate since they might include objects that would otherwise have
191 // been collected by a GC.
192 assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
193 guarantee(cause == GCCause::_heap_dump ||
194 cause == GCCause::_heap_inspection, "Invalid cause");
195 }
196
197 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
198 // Not supported
199 ShouldNotReachHere();
200 }
201
202 bool ZCollectedHeap::supports_tlab_allocation() const {
203 return true;
204 }
205
206 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
207 return _heap.tlab_capacity();
208 }
209
210 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
211 return _heap.tlab_used();
212 }
213
214 size_t ZCollectedHeap::max_tlab_size() const {
215 return _heap.max_tlab_size();
216 }
217
218 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
219 return _heap.unsafe_max_tlab_alloc();
220 }
221
222 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
223 return false;
224 }
225
226 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
227 // Not supported
228 ShouldNotReachHere();
229 return true;
230 }
231
232 bool ZCollectedHeap::card_mark_must_follow_store() const {
233 // Not supported
234 ShouldNotReachHere();
235 return false;
236 }
237
238 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
239 return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
240 }
241
242 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
243 return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
244 }
245
246 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
247 _heap.object_iterate(cl, true /* visit_weaks */);
248 }
249
250 void ZCollectedHeap::register_nmethod(nmethod* nm) {
251 ZNMethod::register_nmethod(nm);
252 }
253
254 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
255 ZNMethod::unregister_nmethod(nm);
256 }
257
258 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
259 ZNMethod::flush_nmethod(nm);
260 }
261
262 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
263 // Does nothing
264 }
265
266 WorkGang* ZCollectedHeap::get_safepoint_workers() {
267 return _runtime_workers.workers();
268 }
269
270 jlong ZCollectedHeap::millis_since_last_gc() {
271 return ZStatCycle::time_since_last() / MILLIUNITS;
272 }
273
274 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
275 tc->do_thread(_director);
276 tc->do_thread(_driver);
277 tc->do_thread(_uncommitter);
278 tc->do_thread(_stat);
279 _heap.worker_threads_do(tc);
280 _runtime_workers.threads_do(tc);
281 }
282
283 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
284 return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity());
285 }
286
287 void ZCollectedHeap::safepoint_synchronize_begin() {
288 SuspendibleThreadSet::synchronize();
289 }
290
291 void ZCollectedHeap::safepoint_synchronize_end() {
292 SuspendibleThreadSet::desynchronize();
293 }
294
295 void ZCollectedHeap::prepare_for_verify() {
296 // Does nothing
297 }
298
299 void ZCollectedHeap::print_on(outputStream* st) const {
300 _heap.print_on(st);
301 }
302
303 void ZCollectedHeap::print_on_error(outputStream* st) const {
304 CollectedHeap::print_on_error(st);
305
306 st->print_cr( "Heap");
307 st->print_cr( " GlobalPhase: %u", ZGlobalPhase);
308 st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum);
309 st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
310 st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
311 st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
312 st->print_cr( "Metadata Bits");
313 st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask);
314 st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask);
315 st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask);
316 st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked);
317 st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped);
318 }
319
320 void ZCollectedHeap::print_extended_on(outputStream* st) const {
321 _heap.print_extended_on(st);
322 }
323
324 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
325 _director->print_on(st);
326 st->cr();
327 _driver->print_on(st);
328 st->cr();
329 _uncommitter->print_on(st);
330 st->cr();
331 _stat->print_on(st);
332 st->cr();
333 _heap.print_worker_threads_on(st);
334 _runtime_workers.print_threads_on(st);
335 }
336
337 void ZCollectedHeap::print_tracing_info() const {
338 // Does nothing
339 }
340
341 bool ZCollectedHeap::print_location(outputStream* st, void* addr) const {
342 return _heap.print_location(st, (uintptr_t)addr);
343 }
344
345 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
346 _heap.verify();
347 }
348
349 bool ZCollectedHeap::is_oop(oop object) const {
350 return _heap.is_oop(ZOop::to_address(object));
351 }
--- EOF ---