1 /*
  2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/shared/gcHeapSummary.hpp"
 26 #include "gc/shared/suspendibleThreadSet.hpp"
 27 #include "gc/z/zCollectedHeap.hpp"
 28 #include "gc/z/zGlobals.hpp"
 29 #include "gc/z/zHeap.inline.hpp"
 30 #include "gc/z/zNMethod.hpp"
 31 #include "gc/z/zServiceability.hpp"
 32 #include "gc/z/zStat.hpp"
 33 #include "gc/z/zUtils.inline.hpp"
 34 #include "runtime/mutexLocker.hpp"
 35 
 36 ZCollectedHeap* ZCollectedHeap::heap() {
 37   CollectedHeap* heap = Universe::heap();
 38   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
 39   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
 40   return (ZCollectedHeap*)heap;
 41 }
 42 
 43 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
 44     _collector_policy(policy),
 45     _soft_ref_policy(),
 46     _barrier_set(),
 47     _initialize(&_barrier_set),
 48     _heap(),
 49     _director(new ZDirector()),
 50     _driver(new ZDriver()),
 51     _stat(new ZStat()),
 52     _runtime_workers() {}
 53 
 54 CollectedHeap::Name ZCollectedHeap::kind() const {
 55   return CollectedHeap::Z;
 56 }
 57 
 58 const char* ZCollectedHeap::name() const {
 59   return ZGCName;
 60 }
 61 
 62 jint ZCollectedHeap::initialize() {
 63   if (!_heap.is_initialized()) {
 64     return JNI_ENOMEM;
 65   }
 66 
 67   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
 68                              (HeapWord*)ZAddressReservedEnd());
 69 
 70   return JNI_OK;
 71 }
 72 
 73 void ZCollectedHeap::initialize_serviceability() {
 74   _heap.serviceability_initialize();
 75 }
 76 
 77 void ZCollectedHeap::stop() {
 78   _director->stop();
 79   _driver->stop();
 80   _stat->stop();
 81 }
 82 
 83 CollectorPolicy* ZCollectedHeap::collector_policy() const {
 84   return _collector_policy;
 85 }
 86 
 87 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
 88   return &_soft_ref_policy;
 89 }
 90 
 91 size_t ZCollectedHeap::max_capacity() const {
 92   return _heap.max_capacity();
 93 }
 94 
 95 size_t ZCollectedHeap::capacity() const {
 96   return _heap.capacity();
 97 }
 98 
 99 size_t ZCollectedHeap::used() const {
100   return _heap.used();
101 }
102 
103 bool ZCollectedHeap::is_maximal_no_gc() const {
104   // Not supported
105   ShouldNotReachHere();
106   return false;
107 }
108 
109 bool ZCollectedHeap::is_scavengable(oop obj) {
110   return false;
111 }
112 
113 bool ZCollectedHeap::is_in(const void* p) const {
114   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
115 }
116 
117 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
118   return is_in(p);
119 }
120 
121 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
122   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
123   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
124 
125   if (addr != 0) {
126     *actual_size = requested_size;
127   }
128 
129   return (HeapWord*)addr;
130 }
131 
132 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
133   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
134   return (HeapWord*)_heap.alloc_object(size_in_bytes);
135 }
136 
137 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
138                                                              size_t size,
139                                                              Metaspace::MetadataType mdtype) {
140   MetaWord* result;
141 
142   // Start asynchronous GC
143   collect(GCCause::_metadata_GC_threshold);
144 
145   // Expand and retry allocation
146   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
147   if (result != NULL) {
148     return result;
149   }
150 
151   // Start synchronous GC
152   collect(GCCause::_metadata_GC_clear_soft_refs);
153 
154   // Retry allocation
155   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
156   if (result != NULL) {
157     return result;
158   }
159 
160   // Expand and retry allocation
161   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
162   if (result != NULL) {
163     return result;
164   }
165 
166   // Out of memory
167   return NULL;
168 }
169 
170 void ZCollectedHeap::collect(GCCause::Cause cause) {
171   _driver->collect(cause);
172 }
173 
174 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
175   // These collection requests are ignored since ZGC can't run a synchronous
176   // GC cycle from within the VM thread. This is considered benign, since the
177   // only GC causes coming in here should be heap dumper and heap inspector.
178   // However, neither the heap dumper nor the heap inspector really need a GC
179   // to happen, but the result of their heap iterations might in that case be
180   // less accurate since they might include objects that would otherwise have
181   // been collected by a GC.
182   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
183   guarantee(cause == GCCause::_heap_dump ||
184             cause == GCCause::_heap_inspection, "Invalid cause");
185 }
186 
187 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
188   // Not supported
189   ShouldNotReachHere();
190 }
191 
192 bool ZCollectedHeap::supports_tlab_allocation() const {
193   return true;
194 }
195 
196 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
197   return _heap.tlab_capacity();
198 }
199 
200 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
201   return _heap.tlab_used();
202 }
203 
204 size_t ZCollectedHeap::max_tlab_size() const {
205   return _heap.max_tlab_size();
206 }
207 
208 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
209   return _heap.unsafe_max_tlab_alloc();
210 }
211 
212 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
213   return false;
214 }
215 
216 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
217   // Not supported
218   ShouldNotReachHere();
219   return true;
220 }
221 
222 bool ZCollectedHeap::card_mark_must_follow_store() const {
223   // Not supported
224   ShouldNotReachHere();
225   return false;
226 }
227 
228 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
229   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
230 }
231 
232 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
233   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
234 }
235 
236 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
237   _heap.object_iterate(cl, true /* visit_referents */);
238 }
239 
240 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
241   _heap.object_iterate(cl, true /* visit_referents */);
242 }
243 
244 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
245   return (HeapWord*)_heap.block_start((uintptr_t)addr);
246 }
247 
248 size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
249   size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
250   return ZUtils::bytes_to_words(size_in_bytes);
251 }
252 
253 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
254   return _heap.block_is_obj((uintptr_t)addr);
255 }
256 
257 void ZCollectedHeap::register_nmethod(nmethod* nm) {
258   ZNMethod::register_nmethod(nm);
259 }
260 
261 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
262   ZNMethod::unregister_nmethod(nm);
263 }
264 
265 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
266   ZNMethod::flush_nmethod(nm);
267 }
268 
269 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
270   // Does nothing
271 }
272 
273 WorkGang* ZCollectedHeap::get_safepoint_workers() {
274   return _runtime_workers.workers();
275 }
276 
277 jlong ZCollectedHeap::millis_since_last_gc() {
278   return ZStatCycle::time_since_last() / MILLIUNITS;
279 }
280 
281 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
282   tc->do_thread(_director);
283   tc->do_thread(_driver);
284   tc->do_thread(_stat);
285   _heap.worker_threads_do(tc);
286   _runtime_workers.threads_do(tc);
287 }
288 
289 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
290   const size_t capacity_in_words = capacity() / HeapWordSize;
291   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
292   return VirtualSpaceSummary(reserved_region().start(),
293                              reserved_region().start() + capacity_in_words,
294                              reserved_region().start() + max_capacity_in_words);
295 }
296 
297 void ZCollectedHeap::safepoint_synchronize_begin() {
298   SuspendibleThreadSet::synchronize();
299 }
300 
301 void ZCollectedHeap::safepoint_synchronize_end() {
302   SuspendibleThreadSet::desynchronize();
303 }
304 
305 void ZCollectedHeap::prepare_for_verify() {
306   // Does nothing
307 }
308 
309 void ZCollectedHeap::print_on(outputStream* st) const {
310   _heap.print_on(st);
311 }
312 
313 void ZCollectedHeap::print_on_error(outputStream* st) const {
314   CollectedHeap::print_on_error(st);
315 
316   st->print_cr("Address Space");
317   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
318   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
319   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
320   st->print_cr( "Heap");
321   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
322   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
323   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
324   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
325   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
326   st->print_cr( "Metadata Bits");
327   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
328   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
329   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
330   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
331   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
332 }
333 
334 void ZCollectedHeap::print_extended_on(outputStream* st) const {
335   _heap.print_extended_on(st);
336 }
337 
338 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
339   _director->print_on(st);
340   st->cr();
341   _driver->print_on(st);
342   st->cr();
343   _stat->print_on(st);
344   st->cr();
345   _heap.print_worker_threads_on(st);
346   _runtime_workers.print_threads_on(st);
347 }
348 
349 void ZCollectedHeap::print_tracing_info() const {
350   // Does nothing
351 }
352 
353 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
354   _heap.verify();
355 }
356 
357 bool ZCollectedHeap::is_oop(oop object) const {
358   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
359 }