1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcHeapSummary.hpp"
  26 #include "gc/shared/locationPrinter.hpp"
  27 #include "gc/shared/suspendibleThreadSet.hpp"
  28 #include "gc/z/zCollectedHeap.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zHeap.inline.hpp"
  31 #include "gc/z/zNMethod.hpp"
  32 #include "gc/z/zObjArrayAllocator.hpp"
  33 #include "gc/z/zServiceability.hpp"
  34 #include "gc/z/zStat.hpp"
  35 #include "gc/z/zUtils.inline.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 
  39 ZCollectedHeap* ZCollectedHeap::heap() {
  40   CollectedHeap* heap = Universe::heap();
  41   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  42   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  43   return (ZCollectedHeap*)heap;
  44 }
  45 
  46 ZCollectedHeap::ZCollectedHeap() :
  47     _soft_ref_policy(),
  48     _barrier_set(),
  49     _initialize(&_barrier_set),
  50     _heap(),
  51     _director(new ZDirector()),
  52     _driver(new ZDriver()),
  53     _uncommitter(new ZUncommitter()),
  54     _stat(new ZStat()),
  55     _runtime_workers() {}
  56 
  57 CollectedHeap::Name ZCollectedHeap::kind() const {
  58   return CollectedHeap::Z;
  59 }
  60 
  61 const char* ZCollectedHeap::name() const {
  62   return ZName;
  63 }
  64 
  65 jint ZCollectedHeap::initialize() {
  66   if (!_heap.is_initialized()) {
  67     return JNI_ENOMEM;
  68   }
  69 
  70   Universe::calculate_verify_data((HeapWord*)ZAddressReservedStart,
  71                                   (HeapWord*)ZAddressReservedEnd);
  72 
  73   return JNI_OK;
  74 }
  75 
  76 void ZCollectedHeap::initialize_serviceability() {
  77   _heap.serviceability_initialize();
  78 }
  79 
  80 void ZCollectedHeap::stop() {
  81   _director->stop();
  82   _driver->stop();
  83   _uncommitter->stop();
  84   _stat->stop();
  85 }
  86 
  87 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  88   return &_soft_ref_policy;
  89 }
  90 
  91 size_t ZCollectedHeap::max_capacity() const {
  92   return _heap.max_capacity();
  93 }
  94 
  95 size_t ZCollectedHeap::capacity() const {
  96   return _heap.capacity();
  97 }
  98 
  99 size_t ZCollectedHeap::used() const {
 100   return _heap.used();
 101 }
 102 
 103 size_t ZCollectedHeap::unused() const {
 104   return _heap.unused();
 105 }
 106 
 107 bool ZCollectedHeap::is_maximal_no_gc() const {
 108   // Not supported
 109   ShouldNotReachHere();
 110   return false;
 111 }
 112 
 113 bool ZCollectedHeap::is_in(const void* p) const {
 114   return _heap.is_in((uintptr_t)p);
 115 }
 116 
 117 uint32_t ZCollectedHeap::hash_oop(oop obj) const {
 118   return _heap.hash_oop(obj);
 119 }
 120 
 121 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 122   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
 123   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
 124 
 125   if (addr != 0) {
 126     *actual_size = requested_size;
 127   }
 128 
 129   return (HeapWord*)addr;
 130 }
 131 
 132 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 133   if (!do_zero) {
 134     return CollectedHeap::array_allocate(klass, size, length, false /* do_zero */, THREAD);
 135   }
 136 
 137   ZObjArrayAllocator allocator(klass, size, length, THREAD);
 138   return allocator.allocate();
 139 }
 140 
 141 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
 142   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 143   return (HeapWord*)_heap.alloc_object(size_in_bytes);
 144 }
 145 
 146 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 147                                                              size_t size,
 148                                                              Metaspace::MetadataType mdtype) {
 149   MetaWord* result;
 150 
 151   // Start asynchronous GC
 152   collect(GCCause::_metadata_GC_threshold);
 153 
 154   // Expand and retry allocation
 155   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 156   if (result != NULL) {
 157     return result;
 158   }
 159 
 160   // Start synchronous GC
 161   collect(GCCause::_metadata_GC_clear_soft_refs);
 162 
 163   // Retry allocation
 164   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 165   if (result != NULL) {
 166     return result;
 167   }
 168 
 169   // Expand and retry allocation
 170   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 171   if (result != NULL) {
 172     return result;
 173   }
 174 
 175   // Out of memory
 176   return NULL;
 177 }
 178 
 179 void ZCollectedHeap::collect(GCCause::Cause cause) {
 180   _driver->collect(cause);
 181 }
 182 
 183 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 184   // These collection requests are ignored since ZGC can't run a synchronous
 185   // GC cycle from within the VM thread. This is considered benign, since the
 186   // only GC causes coming in here should be heap dumper and heap inspector.
 187   // However, neither the heap dumper nor the heap inspector really need a GC
 188   // to happen, but the result of their heap iterations might in that case be
 189   // less accurate since they might include objects that would otherwise have
 190   // been collected by a GC.
 191   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
 192   guarantee(cause == GCCause::_heap_dump ||
 193             cause == GCCause::_heap_inspection, "Invalid cause");
 194 }
 195 
 196 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 197   // Not supported
 198   ShouldNotReachHere();
 199 }
 200 
 201 bool ZCollectedHeap::supports_tlab_allocation() const {
 202   return true;
 203 }
 204 
 205 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
 206   return _heap.tlab_capacity();
 207 }
 208 
 209 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
 210   return _heap.tlab_used();
 211 }
 212 
 213 size_t ZCollectedHeap::max_tlab_size() const {
 214   return _heap.max_tlab_size();
 215 }
 216 
 217 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
 218   return _heap.unsafe_max_tlab_alloc();
 219 }
 220 
 221 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
 222   return false;
 223 }
 224 
 225 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 226   // Not supported
 227   ShouldNotReachHere();
 228   return true;
 229 }
 230 
 231 bool ZCollectedHeap::card_mark_must_follow_store() const {
 232   // Not supported
 233   ShouldNotReachHere();
 234   return false;
 235 }
 236 
 237 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
 238   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
 239 }
 240 
 241 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
 242   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
 243 }
 244 
 245 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
 246   _heap.object_iterate(cl, true /* visit_weaks */);
 247 }
 248 
 249 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 250   _heap.object_iterate(cl, true /* visit_weaks */);
 251 }
 252 
 253 void ZCollectedHeap::register_nmethod(nmethod* nm) {
 254   ZNMethod::register_nmethod(nm);
 255 }
 256 
 257 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
 258   ZNMethod::unregister_nmethod(nm);
 259 }
 260 
 261 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
 262   ZNMethod::flush_nmethod(nm);
 263 }
 264 
 265 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 266   // Does nothing
 267 }
 268 
 269 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 270   return _runtime_workers.workers();
 271 }
 272 
 273 jlong ZCollectedHeap::millis_since_last_gc() {
 274   return ZStatCycle::time_since_last() / MILLIUNITS;
 275 }
 276 
 277 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 278   tc->do_thread(_director);
 279   tc->do_thread(_driver);
 280   tc->do_thread(_uncommitter);
 281   tc->do_thread(_stat);
 282   _heap.worker_threads_do(tc);
 283   _runtime_workers.threads_do(tc);
 284 }
 285 
 286 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 287   const size_t capacity_in_words = capacity() / HeapWordSize;
 288   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 289   HeapWord* const heap_start = (HeapWord*)ZAddressReservedStart;
 290   return VirtualSpaceSummary(heap_start,
 291                              heap_start + capacity_in_words,
 292                              heap_start + max_capacity_in_words);
 293 }
 294 
 295 void ZCollectedHeap::safepoint_synchronize_begin() {
 296   SuspendibleThreadSet::synchronize();
 297 }
 298 
 299 void ZCollectedHeap::safepoint_synchronize_end() {
 300   SuspendibleThreadSet::desynchronize();
 301 }
 302 
 303 void ZCollectedHeap::prepare_for_verify() {
 304   // Does nothing
 305 }
 306 
 307 void ZCollectedHeap::print_on(outputStream* st) const {
 308   _heap.print_on(st);
 309 }
 310 
 311 void ZCollectedHeap::print_on_error(outputStream* st) const {
 312   CollectedHeap::print_on_error(st);
 313 
 314   st->print_cr("Address Space");
 315   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
 316   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
 317   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
 318   st->print_cr( "Heap");
 319   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
 320   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 321   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 322   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 323   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 324   st->print_cr( "Metadata Bits");
 325   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 326   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 327   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 328   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 329   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 330 }
 331 
 332 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 333   _heap.print_extended_on(st);
 334 }
 335 
 336 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 337   _director->print_on(st);
 338   st->cr();
 339   _driver->print_on(st);
 340   st->cr();
 341   _uncommitter->print_on(st);
 342   st->cr();
 343   _stat->print_on(st);
 344   st->cr();
 345   _heap.print_worker_threads_on(st);
 346   _runtime_workers.print_threads_on(st);
 347 }
 348 
 349 void ZCollectedHeap::print_tracing_info() const {
 350   // Does nothing
 351 }
 352 
 353 bool ZCollectedHeap::print_location(outputStream* st, void* addr) const {
 354   if (LocationPrinter::is_valid_obj(addr)) {
 355     st->print(INTPTR_FORMAT " is a %s oop: ", p2i(addr),
 356               ZAddress::is_good(reinterpret_cast<uintptr_t>(addr)) ? "good" : "bad");
 357     cast_to_oop(addr)->print_on(st);
 358     return true;
 359   }
 360   return false;
 361 }
 362 
 363 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 364   _heap.verify();
 365 }
 366 
 367 bool ZCollectedHeap::is_oop(oop object) const {
 368   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 369 }
 370 
 371 void ZCollectedHeap::check_oop_location(void* addr) const {
 372   assert(check_obj_alignment(addr), "address is not aligned");
 373 
 374   const uintptr_t addr_int = reinterpret_cast<uintptr_t>(addr);
 375   assert(addr_int >= ZAddressSpaceStart, "address is outside of the heap");
 376   assert(addr_int < ZAddressSpaceEnd,    "address is outside of the heap");
 377 }