0 /*
1  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  */
22 
23 #include "precompiled.hpp"
24 #include "gc/shared/gcHeapSummary.hpp"
25 #include "gc/shared/suspendibleThreadSet.hpp"
26 #include "gc/z/zCollectedHeap.hpp"
27 #include "gc/z/zGlobals.hpp"
28 #include "gc/z/zHeap.inline.hpp"
29 #include "gc/z/zNMethod.hpp"
30 #include "gc/z/zServiceability.hpp"
31 #include "gc/z/zStat.hpp"
32 #include "gc/z/zUtils.inline.hpp"
33 #include "runtime/mutexLocker.hpp"
34 
35 ZCollectedHeap* ZCollectedHeap::heap() {
36   CollectedHeap* heap = Universe::heap();
37   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
38   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
39   return (ZCollectedHeap*)heap;
40 }
41 
42 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
43     _collector_policy(policy),
44     _soft_ref_policy(),
45     _barrier_set(),
46     _initialize(&_barrier_set),
47     _heap(),
48     _director(new ZDirector()),
49     _driver(new ZDriver()),
50     _stat(new ZStat()),
51     _runtime_workers() {}
52 
53 CollectedHeap::Name ZCollectedHeap::kind() const {
54   return CollectedHeap::Z;
55 }
56 
57 const char* ZCollectedHeap::name() const {
58   return ZGCName;
59 }
60 
61 jint ZCollectedHeap::initialize() {
62   if (!_heap.is_initialized()) {
63     return JNI_ENOMEM;
64   }
65 
66   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
67                              (HeapWord*)ZAddressReservedEnd());
68 
69   return JNI_OK;
70 }
71 
72 void ZCollectedHeap::initialize_serviceability() {
73   _heap.serviceability_initialize();
74 }
75 
76 void ZCollectedHeap::stop() {
77   _director->stop();
78   _driver->stop();
79   _stat->stop();
80 }
81 
82 CollectorPolicy* ZCollectedHeap::collector_policy() const {
83   return _collector_policy;
84 }
85 
86 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
87   return &_soft_ref_policy;
88 }
89 
90 size_t ZCollectedHeap::max_capacity() const {
91   return _heap.max_capacity();
92 }
93 
94 size_t ZCollectedHeap::capacity() const {
95   return _heap.capacity();
96 }
97 
98 size_t ZCollectedHeap::used() const {
99   return _heap.used();
100 }
101 
102 bool ZCollectedHeap::is_maximal_no_gc() const {
103   // Not supported
104   ShouldNotReachHere();
105   return false;
106 }
107 
108 bool ZCollectedHeap::is_scavengable(oop obj) {
109   return false;
110 }
111 
112 bool ZCollectedHeap::is_in(const void* p) const {
113   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
114 }
115 
116 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
117   return is_in(p);
118 }
119 
120 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
121   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
122   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
123 
124   if (addr != 0) {
125     *actual_size = requested_size;
126   }
127 
128   return (HeapWord*)addr;
129 }
130 
131 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
132   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
133   return (HeapWord*)_heap.alloc_object(size_in_bytes);
134 }
135 
136 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
137                                                              size_t size,
138                                                              Metaspace::MetadataType mdtype) {
139   MetaWord* result;
140 
141   // Start asynchronous GC
142   collect(GCCause::_metadata_GC_threshold);
143 
144   // Expand and retry allocation
145   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
146   if (result != NULL) {
147     return result;
148   }
149 
150   // Start synchronous GC
151   collect(GCCause::_metadata_GC_clear_soft_refs);
152 
153   // Retry allocation
154   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
155   if (result != NULL) {
156     return result;
157   }
158 
159   // Expand and retry allocation
160   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
161   if (result != NULL) {
162     return result;
163   }
164 
165   // Out of memory
166   return NULL;
167 }
168 
169 void ZCollectedHeap::collect(GCCause::Cause cause) {
170   _driver->collect(cause);
171 }
172 
173 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
174   // These collection requests are ignored since ZGC can't run a synchronous
175   // GC cycle from within the VM thread. This is considered benign, since the
176   // only GC causes coming in here should be heap dumper and heap inspector.
177   // However, neither the heap dumper nor the heap inspector really need a GC
178   // to happen, but the result of their heap iterations might in that case be
179   // less accurate since they might include objects that would otherwise have
180   // been collected by a GC.
181   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
182   guarantee(cause == GCCause::_heap_dump ||
183             cause == GCCause::_heap_inspection, "Invalid cause");
184 }
185 
186 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
187   // Not supported
188   ShouldNotReachHere();
189 }
190 
191 bool ZCollectedHeap::supports_tlab_allocation() const {
192   return true;
193 }
194 
195 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
196   return _heap.tlab_capacity();
197 }
198 
199 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
200   return _heap.tlab_used();
201 }
202 
203 size_t ZCollectedHeap::max_tlab_size() const {
204   return _heap.max_tlab_size();
205 }
206 
207 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
208   return _heap.unsafe_max_tlab_alloc();
209 }
210 
211 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
212   return false;
213 }
214 
215 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
216   // Not supported
217   ShouldNotReachHere();
218   return true;
219 }
220 
221 bool ZCollectedHeap::card_mark_must_follow_store() const {
222   // Not supported
223   ShouldNotReachHere();
224   return false;
225 }
226 
227 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
228   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
229 }
230 
231 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
232   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
233 }
234 
235 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
236   _heap.object_iterate(cl, true /* visit_referents */);
237 }
238 
239 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
240   _heap.object_iterate(cl, true /* visit_referents */);
241 }
242 
243 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
244   return (HeapWord*)_heap.block_start((uintptr_t)addr);
245 }
246 
247 size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
248   size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
249   return ZUtils::bytes_to_words(size_in_bytes);
250 }
251 
252 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
253   return _heap.block_is_obj((uintptr_t)addr);
254 }
255 
256 void ZCollectedHeap::register_nmethod(nmethod* nm) {
257   ZNMethod::register_nmethod(nm);
258 }
259 
260 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
261   ZNMethod::unregister_nmethod(nm);
262 }
263 
264 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
265   ZNMethod::flush_nmethod(nm);
266 }
267 
268 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
269   // Does nothing
270 }
271 
272 WorkGang* ZCollectedHeap::get_safepoint_workers() {
273   return _runtime_workers.workers();
274 }
275 
276 jlong ZCollectedHeap::millis_since_last_gc() {
277   return ZStatCycle::time_since_last() / MILLIUNITS;
278 }
279 
280 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
281   tc->do_thread(_director);
282   tc->do_thread(_driver);
283   tc->do_thread(_stat);
284   _heap.worker_threads_do(tc);
285   _runtime_workers.threads_do(tc);
286 }
287 
288 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
289   const size_t capacity_in_words = capacity() / HeapWordSize;
290   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
291   return VirtualSpaceSummary(reserved_region().start(),
292                              reserved_region().start() + capacity_in_words,
293                              reserved_region().start() + max_capacity_in_words);
294 }
295 
296 void ZCollectedHeap::safepoint_synchronize_begin() {
297   SuspendibleThreadSet::synchronize();
298 }
299 
300 void ZCollectedHeap::safepoint_synchronize_end() {
301   SuspendibleThreadSet::desynchronize();
302 }
303 
304 void ZCollectedHeap::prepare_for_verify() {
305   // Does nothing
306 }
307 
308 void ZCollectedHeap::print_on(outputStream* st) const {
309   _heap.print_on(st);
310 }
311 
312 void ZCollectedHeap::print_on_error(outputStream* st) const {
313   CollectedHeap::print_on_error(st);
314 
315   st->print_cr("Address Space");
316   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
317   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
318   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
319   st->print_cr( "Heap");
320   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
321   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
322   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
323   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
324   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
325   st->print_cr( "Metadata Bits");
326   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
327   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
328   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
329   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
330   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
331 }
332 
333 void ZCollectedHeap::print_extended_on(outputStream* st) const {
334   _heap.print_extended_on(st);
335 }
336 
337 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
338   _director->print_on(st);
339   st->cr();
340   _driver->print_on(st);
341   st->cr();
342   _stat->print_on(st);
343   st->cr();
344   _heap.print_worker_threads_on(st);
345   _runtime_workers.print_threads_on(st);
346 }
347 
348 void ZCollectedHeap::print_tracing_info() const {
349   // Does nothing
350 }
351 
352 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
353   _heap.verify();
354 }
355 
356 bool ZCollectedHeap::is_oop(oop object) const {
357   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
358 }
--- EOF ---