1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcHeapSummary.hpp"
  26 #include "gc/shared/memAllocator.hpp"
  27 #include "gc/shared/suspendibleThreadSet.hpp"
  28 #include "gc/z/zCollectedHeap.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zHeap.inline.hpp"
  31 #include "gc/z/zNMethod.hpp"
  32 #include "gc/z/zServiceability.hpp"
  33 #include "gc/z/zStat.hpp"
  34 #include "gc/z/zUtils.inline.hpp"
  35 #include "memory/universe.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 
  38 ZCollectedHeap* ZCollectedHeap::heap() {
  39   CollectedHeap* heap = Universe::heap();
  40   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  41   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  42   return (ZCollectedHeap*)heap;
  43 }
  44 
  45 ZCollectedHeap::ZCollectedHeap() :
  46     _soft_ref_policy(),
  47     _barrier_set(),
  48     _initialize(&_barrier_set),
  49     _heap(),
  50     _director(new ZDirector()),
  51     _driver(new ZDriver()),
  52     _uncommitter(new ZUncommitter()),
  53     _stat(new ZStat()),
  54     _runtime_workers() {}
  55 
  56 CollectedHeap::Name ZCollectedHeap::kind() const {
  57   return CollectedHeap::Z;
  58 }
  59 
  60 const char* ZCollectedHeap::name() const {
  61   return ZName;
  62 }
  63 
  64 jint ZCollectedHeap::initialize() {
  65   if (!_heap.is_initialized()) {
  66     return JNI_ENOMEM;
  67   }
  68 
  69   initialize_reserved_region((HeapWord*)ZAddressReservedStart,
  70                              (HeapWord*)ZAddressReservedEnd);
  71 
  72   return JNI_OK;
  73 }
  74 
  75 void ZCollectedHeap::initialize_serviceability() {
  76   _heap.serviceability_initialize();
  77 }
  78 
  79 void ZCollectedHeap::stop() {
  80   _director->stop();
  81   _driver->stop();
  82   _uncommitter->stop();
  83   _stat->stop();
  84 }
  85 
  86 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  87   return &_soft_ref_policy;
  88 }
  89 
  90 size_t ZCollectedHeap::max_capacity() const {
  91   return _heap.max_capacity();
  92 }
  93 
  94 size_t ZCollectedHeap::capacity() const {
  95   return _heap.capacity();
  96 }
  97 
  98 size_t ZCollectedHeap::used() const {
  99   return _heap.used();
 100 }
 101 
 102 size_t ZCollectedHeap::unused() const {
 103   return _heap.unused();
 104 }
 105 
 106 bool ZCollectedHeap::is_maximal_no_gc() const {
 107   // Not supported
 108   ShouldNotReachHere();
 109   return false;
 110 }
 111 
 112 bool ZCollectedHeap::is_in(const void* p) const {
 113   return _heap.is_in((uintptr_t)p);
 114 }
 115 
 116 uint32_t ZCollectedHeap::hash_oop(oop obj) const {
 117   return _heap.hash_oop(obj);
 118 }
 119 
 120 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 121   ObjArrayAllocator allocator(klass, size, length, false /* do_zero */, THREAD);
 122   return allocator.allocate();
 123 }
 124 
 125 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 126   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
 127   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
 128 
 129   if (addr != 0) {
 130     *actual_size = requested_size;
 131   }
 132 
 133   return (HeapWord*)addr;
 134 }
 135 
 136 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
 137   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 138   return (HeapWord*)_heap.alloc_object(size_in_bytes);
 139 }
 140 
 141 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 142                                                              size_t size,
 143                                                              Metaspace::MetadataType mdtype) {
 144   MetaWord* result;
 145 
 146   // Start asynchronous GC
 147   collect(GCCause::_metadata_GC_threshold);
 148 
 149   // Expand and retry allocation
 150   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 151   if (result != NULL) {
 152     return result;
 153   }
 154 
 155   // Start synchronous GC
 156   collect(GCCause::_metadata_GC_clear_soft_refs);
 157 
 158   // Retry allocation
 159   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 160   if (result != NULL) {
 161     return result;
 162   }
 163 
 164   // Expand and retry allocation
 165   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 166   if (result != NULL) {
 167     return result;
 168   }
 169 
 170   // Out of memory
 171   return NULL;
 172 }
 173 
 174 void ZCollectedHeap::collect(GCCause::Cause cause) {
 175   _driver->collect(cause);
 176 }
 177 
 178 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 179   // These collection requests are ignored since ZGC can't run a synchronous
 180   // GC cycle from within the VM thread. This is considered benign, since the
 181   // only GC causes coming in here should be heap dumper and heap inspector.
 182   // However, neither the heap dumper nor the heap inspector really need a GC
 183   // to happen, but the result of their heap iterations might in that case be
 184   // less accurate since they might include objects that would otherwise have
 185   // been collected by a GC.
 186   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
 187   guarantee(cause == GCCause::_heap_dump ||
 188             cause == GCCause::_heap_inspection, "Invalid cause");
 189 }
 190 
 191 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 192   // Not supported
 193   ShouldNotReachHere();
 194 }
 195 
 196 bool ZCollectedHeap::supports_tlab_allocation() const {
 197   return true;
 198 }
 199 
 200 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
 201   return _heap.tlab_capacity();
 202 }
 203 
 204 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
 205   return _heap.tlab_used();
 206 }
 207 
 208 size_t ZCollectedHeap::max_tlab_size() const {
 209   return _heap.max_tlab_size();
 210 }
 211 
 212 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
 213   return _heap.unsafe_max_tlab_alloc();
 214 }
 215 
 216 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
 217   return false;
 218 }
 219 
 220 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 221   // Not supported
 222   ShouldNotReachHere();
 223   return true;
 224 }
 225 
 226 bool ZCollectedHeap::card_mark_must_follow_store() const {
 227   // Not supported
 228   ShouldNotReachHere();
 229   return false;
 230 }
 231 
 232 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
 233   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
 234 }
 235 
 236 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
 237   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
 238 }
 239 
 240 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
 241   _heap.object_iterate(cl, true /* visit_weaks */);
 242 }
 243 
 244 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 245   _heap.object_iterate(cl, true /* visit_weaks */);
 246 }
 247 
 248 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
 249   return (HeapWord*)_heap.block_start((uintptr_t)addr);
 250 }
 251 
 252 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
 253   return _heap.block_is_obj((uintptr_t)addr);
 254 }
 255 
 256 void ZCollectedHeap::register_nmethod(nmethod* nm) {
 257   ZNMethod::register_nmethod(nm);
 258 }
 259 
 260 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
 261   ZNMethod::unregister_nmethod(nm);
 262 }
 263 
 264 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
 265   ZNMethod::flush_nmethod(nm);
 266 }
 267 
 268 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 269   // Does nothing
 270 }
 271 
 272 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 273   return _runtime_workers.workers();
 274 }
 275 
 276 jlong ZCollectedHeap::millis_since_last_gc() {
 277   return ZStatCycle::time_since_last() / MILLIUNITS;
 278 }
 279 
 280 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 281   tc->do_thread(_director);
 282   tc->do_thread(_driver);
 283   tc->do_thread(_uncommitter);
 284   tc->do_thread(_stat);
 285   _heap.worker_threads_do(tc);
 286   _runtime_workers.threads_do(tc);
 287 }
 288 
 289 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 290   const size_t capacity_in_words = capacity() / HeapWordSize;
 291   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 292   return VirtualSpaceSummary(reserved_region().start(),
 293                              reserved_region().start() + capacity_in_words,
 294                              reserved_region().start() + max_capacity_in_words);
 295 }
 296 
 297 void ZCollectedHeap::safepoint_synchronize_begin() {
 298   SuspendibleThreadSet::synchronize();
 299 }
 300 
 301 void ZCollectedHeap::safepoint_synchronize_end() {
 302   SuspendibleThreadSet::desynchronize();
 303 }
 304 
 305 void ZCollectedHeap::prepare_for_verify() {
 306   // Does nothing
 307 }
 308 
 309 void ZCollectedHeap::print_on(outputStream* st) const {
 310   _heap.print_on(st);
 311 }
 312 
 313 void ZCollectedHeap::print_on_error(outputStream* st) const {
 314   CollectedHeap::print_on_error(st);
 315 
 316   st->print_cr("Address Space");
 317   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
 318   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
 319   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
 320   st->print_cr( "Heap");
 321   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
 322   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 323   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 324   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 325   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 326   st->print_cr( "Metadata Bits");
 327   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 328   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 329   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 330   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 331   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 332 }
 333 
 334 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 335   _heap.print_extended_on(st);
 336 }
 337 
 338 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 339   _director->print_on(st);
 340   st->cr();
 341   _driver->print_on(st);
 342   st->cr();
 343   _uncommitter->print_on(st);
 344   st->cr();
 345   _stat->print_on(st);
 346   st->cr();
 347   _heap.print_worker_threads_on(st);
 348   _runtime_workers.print_threads_on(st);
 349 }
 350 
 351 void ZCollectedHeap::print_tracing_info() const {
 352   // Does nothing
 353 }
 354 
 355 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 356   _heap.verify();
 357 }
 358 
 359 bool ZCollectedHeap::is_oop(oop object) const {
 360   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 361 }