1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcHeapSummary.hpp"
  26 #include "gc/shared/suspendibleThreadSet.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zHeap.inline.hpp"
  30 #include "gc/z/zNMethod.hpp"
  31 #include "gc/z/zServiceability.hpp"
  32 #include "gc/z/zStat.hpp"
  33 #include "gc/z/zUtils.inline.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 
  36 ZCollectedHeap* ZCollectedHeap::heap() {
  37   CollectedHeap* heap = Universe::heap();
  38   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  39   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  40   return (ZCollectedHeap*)heap;
  41 }
  42 
  43 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
  44     _collector_policy(policy),
  45     _soft_ref_policy(),
  46     _barrier_set(),
  47     _initialize(&_barrier_set),
  48     _heap(),
  49     _director(new ZDirector()),
  50     _driver(new ZDriver()),
  51     _stat(new ZStat()),
  52     _runtime_workers() {}
  53 
  54 CollectedHeap::Name ZCollectedHeap::kind() const {
  55   return CollectedHeap::Z;
  56 }
  57 
  58 const char* ZCollectedHeap::name() const {
  59   return ZGCName;
  60 }
  61 
  62 jint ZCollectedHeap::initialize() {
  63   if (!_heap.is_initialized()) {
  64     return JNI_ENOMEM;
  65   }
  66 
  67   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
  68                              (HeapWord*)ZAddressReservedEnd());
  69 
  70   return JNI_OK;
  71 }
  72 
  73 void ZCollectedHeap::initialize_serviceability() {
  74   _heap.serviceability_initialize();
  75 }
  76 
  77 void ZCollectedHeap::stop() {
  78   _director->stop();
  79   _driver->stop();
  80   _stat->stop();
  81 }
  82 
  83 CollectorPolicy* ZCollectedHeap::collector_policy() const {
  84   return _collector_policy;
  85 }
  86 
  87 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  88   return &_soft_ref_policy;
  89 }
  90 
  91 size_t ZCollectedHeap::max_capacity() const {
  92   return _heap.max_capacity();
  93 }
  94 
  95 size_t ZCollectedHeap::capacity() const {
  96   return _heap.capacity();
  97 }
  98 
  99 size_t ZCollectedHeap::used() const {
 100   return _heap.used();
 101 }
 102 
 103 bool ZCollectedHeap::is_maximal_no_gc() const {
 104   // Not supported
 105   ShouldNotReachHere();
 106   return false;
 107 }
 108 
 109 bool ZCollectedHeap::is_in(const void* p) const {
 110   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
 111 }
 112 
 113 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
 114   return is_in(p);
 115 }
 116 
 117 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 118   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
 119   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
 120 
 121   if (addr != 0) {
 122     *actual_size = requested_size;
 123   }
 124 
 125   return (HeapWord*)addr;
 126 }
 127 
 128 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
 129   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 130   return (HeapWord*)_heap.alloc_object(size_in_bytes);
 131 }
 132 
 133 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 134                                                              size_t size,
 135                                                              Metaspace::MetadataType mdtype) {
 136   MetaWord* result;
 137 
 138   // Start asynchronous GC
 139   collect(GCCause::_metadata_GC_threshold);
 140 
 141   // Expand and retry allocation
 142   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 143   if (result != NULL) {
 144     return result;
 145   }
 146 
 147   // Start synchronous GC
 148   collect(GCCause::_metadata_GC_clear_soft_refs);
 149 
 150   // Retry allocation
 151   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 152   if (result != NULL) {
 153     return result;
 154   }
 155 
 156   // Expand and retry allocation
 157   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 158   if (result != NULL) {
 159     return result;
 160   }
 161 
 162   // Out of memory
 163   return NULL;
 164 }
 165 
 166 void ZCollectedHeap::collect(GCCause::Cause cause) {
 167   _driver->collect(cause);
 168 }
 169 
 170 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 171   // These collection requests are ignored since ZGC can't run a synchronous
 172   // GC cycle from within the VM thread. This is considered benign, since the
 173   // only GC causes coming in here should be heap dumper and heap inspector.
 174   // However, neither the heap dumper nor the heap inspector really need a GC
 175   // to happen, but the result of their heap iterations might in that case be
 176   // less accurate since they might include objects that would otherwise have
 177   // been collected by a GC.
 178   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
 179   guarantee(cause == GCCause::_heap_dump ||
 180             cause == GCCause::_heap_inspection, "Invalid cause");
 181 }
 182 
 183 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 184   // Not supported
 185   ShouldNotReachHere();
 186 }
 187 
 188 bool ZCollectedHeap::supports_tlab_allocation() const {
 189   return true;
 190 }
 191 
 192 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
 193   return _heap.tlab_capacity();
 194 }
 195 
 196 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
 197   return _heap.tlab_used();
 198 }
 199 
 200 size_t ZCollectedHeap::max_tlab_size() const {
 201   return _heap.max_tlab_size();
 202 }
 203 
 204 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
 205   return _heap.unsafe_max_tlab_alloc();
 206 }
 207 
 208 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
 209   return false;
 210 }
 211 
 212 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 213   // Not supported
 214   ShouldNotReachHere();
 215   return true;
 216 }
 217 
 218 bool ZCollectedHeap::card_mark_must_follow_store() const {
 219   // Not supported
 220   ShouldNotReachHere();
 221   return false;
 222 }
 223 
 224 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
 225   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
 226 }
 227 
 228 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
 229   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
 230 }
 231 
 232 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
 233   _heap.object_iterate(cl, true /* visit_referents */);
 234 }
 235 
 236 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 237   _heap.object_iterate(cl, true /* visit_referents */);
 238 }
 239 
 240 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
 241   return (HeapWord*)_heap.block_start((uintptr_t)addr);
 242 }
 243 
 244 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
 245   return _heap.block_is_obj((uintptr_t)addr);
 246 }
 247 
 248 void ZCollectedHeap::register_nmethod(nmethod* nm) {
 249   ZNMethod::register_nmethod(nm);
 250 }
 251 
 252 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
 253   ZNMethod::unregister_nmethod(nm);
 254 }
 255 
 256 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
 257   ZNMethod::flush_nmethod(nm);
 258 }
 259 
 260 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 261   // Does nothing
 262 }
 263 
 264 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 265   return _runtime_workers.workers();
 266 }
 267 
 268 jlong ZCollectedHeap::millis_since_last_gc() {
 269   return ZStatCycle::time_since_last() / MILLIUNITS;
 270 }
 271 
 272 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 273   tc->do_thread(_director);
 274   tc->do_thread(_driver);
 275   tc->do_thread(_stat);
 276   _heap.worker_threads_do(tc);
 277   _runtime_workers.threads_do(tc);
 278 }
 279 
 280 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 281   const size_t capacity_in_words = capacity() / HeapWordSize;
 282   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 283   return VirtualSpaceSummary(reserved_region().start(),
 284                              reserved_region().start() + capacity_in_words,
 285                              reserved_region().start() + max_capacity_in_words);
 286 }
 287 
 288 void ZCollectedHeap::safepoint_synchronize_begin() {
 289   SuspendibleThreadSet::synchronize();
 290 }
 291 
 292 void ZCollectedHeap::safepoint_synchronize_end() {
 293   SuspendibleThreadSet::desynchronize();
 294 }
 295 
 296 void ZCollectedHeap::prepare_for_verify() {
 297   // Does nothing
 298 }
 299 
 300 void ZCollectedHeap::print_on(outputStream* st) const {
 301   _heap.print_on(st);
 302 }
 303 
 304 void ZCollectedHeap::print_on_error(outputStream* st) const {
 305   CollectedHeap::print_on_error(st);
 306 
 307   st->print_cr("Address Space");
 308   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
 309   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
 310   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
 311   st->print_cr( "Heap");
 312   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
 313   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 314   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 315   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 316   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 317   st->print_cr( "Metadata Bits");
 318   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 319   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 320   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 321   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 322   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 323 }
 324 
 325 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 326   _heap.print_extended_on(st);
 327 }
 328 
 329 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 330   _director->print_on(st);
 331   st->cr();
 332   _driver->print_on(st);
 333   st->cr();
 334   _stat->print_on(st);
 335   st->cr();
 336   _heap.print_worker_threads_on(st);
 337   _runtime_workers.print_threads_on(st);
 338 }
 339 
 340 void ZCollectedHeap::print_tracing_info() const {
 341   // Does nothing
 342 }
 343 
 344 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 345   _heap.verify();
 346 }
 347 
 348 bool ZCollectedHeap::is_oop(oop object) const {
 349   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 350 }