1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcHeapSummary.hpp"
  26 #include "gc/shared/memAllocator.hpp"
  27 #include "gc/shared/suspendibleThreadSet.hpp"
  28 #include "gc/z/zCollectedHeap.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zHeap.inline.hpp"
  31 #include "gc/z/zNMethod.hpp"
  32 #include "gc/z/zServiceability.hpp"
  33 #include "gc/z/zStat.hpp"
  34 #include "gc/z/zUtils.inline.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/arrayKlass.hpp"
  37 #include "runtime/interfaceSupport.inline.hpp"
  38 #include "runtime/mutexLocker.hpp"
  39 
  40 ZCollectedHeap* ZCollectedHeap::heap() {
  41   CollectedHeap* heap = Universe::heap();
  42   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  43   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  44   return (ZCollectedHeap*)heap;
  45 }
  46 
  47 ZCollectedHeap::ZCollectedHeap() :
  48     _soft_ref_policy(),
  49     _barrier_set(),
  50     _initialize(&_barrier_set),
  51     _heap(),
  52     _director(new ZDirector()),
  53     _driver(new ZDriver()),
  54     _uncommitter(new ZUncommitter()),
  55     _stat(new ZStat()),
  56     _runtime_workers() {}
  57 
  58 CollectedHeap::Name ZCollectedHeap::kind() const {
  59   return CollectedHeap::Z;
  60 }
  61 
  62 const char* ZCollectedHeap::name() const {
  63   return ZName;
  64 }
  65 
  66 jint ZCollectedHeap::initialize() {
  67   if (!_heap.is_initialized()) {
  68     return JNI_ENOMEM;
  69   }
  70 
  71   initialize_reserved_region((HeapWord*)ZAddressReservedStart,
  72                              (HeapWord*)ZAddressReservedEnd);
  73 
  74   return JNI_OK;
  75 }
  76 
  77 void ZCollectedHeap::initialize_serviceability() {
  78   _heap.serviceability_initialize();
  79 }
  80 
  81 void ZCollectedHeap::stop() {
  82   _director->stop();
  83   _driver->stop();
  84   _uncommitter->stop();
  85   _stat->stop();
  86 }
  87 
  88 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  89   return &_soft_ref_policy;
  90 }
  91 
  92 size_t ZCollectedHeap::max_capacity() const {
  93   return _heap.max_capacity();
  94 }
  95 
  96 size_t ZCollectedHeap::capacity() const {
  97   return _heap.capacity();
  98 }
  99 
 100 size_t ZCollectedHeap::used() const {
 101   return _heap.used();
 102 }
 103 
 104 size_t ZCollectedHeap::unused() const {
 105   return _heap.unused();
 106 }
 107 
 108 bool ZCollectedHeap::is_maximal_no_gc() const {
 109   // Not supported
 110   ShouldNotReachHere();
 111   return false;
 112 }
 113 
 114 bool ZCollectedHeap::is_in(const void* p) const {
 115   return _heap.is_in((uintptr_t)p);
 116 }
 117 
 118 uint32_t ZCollectedHeap::hash_oop(oop obj) const {
 119   return _heap.hash_oop(obj);
 120 }
 121 
 122 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 123   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
 124   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
 125 
 126   if (addr != 0) {
 127     *actual_size = requested_size;
 128   }
 129 
 130   return (HeapWord*)addr;
 131 }
 132 
 133 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 134   // To avoid delaying safepoints, clearing of arrays is split up in segments
 135   // with safepoint polling inbetween. However, we can't have a not-yet-cleared
 136   // array of oops on the heap when we safepoint since the GC will then stumble
 137   // across uninitialized oops. To avoid this we let an array of oops be an
 138   // array of longs until the clearing has completed. We use longs as substitue
 139   // for oops because they have the same size and alignment when using ZGC (i.e.
 140   // compressed oops is disabled).
 141 
 142   HandleMark hm;
 143 
 144   ArrayKlass* const temp_klass = (do_zero && klass == Universe::objectArrayKlassObj()) ?
 145                                  ArrayKlass::cast(Universe::longArrayKlassObj()) :
 146                                  ArrayKlass::cast(klass);
 147 
 148   // Allocate array
 149   ObjArrayAllocator allocator(temp_klass, size, length, false /* do_zero */, THREAD);
 150   Handle array(THREAD, allocator.allocate());
 151 
 152   if (!array.is_null() && do_zero) {
 153     // Clear array
 154     const size_t segment_max = ZUtils::bytes_to_words(os::vm_page_size());
 155     const size_t skip = arrayOopDesc::header_size(temp_klass->element_type());
 156     size_t remaining = size - skip;
 157 
 158     while (remaining > 0) {
 159       // Clear segment
 160       const size_t segment = MIN2(remaining, segment_max);
 161       Copy::zero_to_words((HeapWord*)array() + (size - remaining), segment);
 162       remaining -= segment;
 163 
 164       // Safepoint
 165       ThreadBlockInVM tbivm((JavaThread*)THREAD);
 166     }
 167 
 168     if (klass != temp_klass) {
 169       // Set actual klass
 170       oopDesc::release_set_klass((HeapWord*)array(), klass);
 171     }
 172   }
 173 
 174   return array();
 175 }
 176 
 177 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
 178   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 179   return (HeapWord*)_heap.alloc_object(size_in_bytes);
 180 }
 181 
 182 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 183                                                              size_t size,
 184                                                              Metaspace::MetadataType mdtype) {
 185   MetaWord* result;
 186 
 187   // Start asynchronous GC
 188   collect(GCCause::_metadata_GC_threshold);
 189 
 190   // Expand and retry allocation
 191   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 192   if (result != NULL) {
 193     return result;
 194   }
 195 
 196   // Start synchronous GC
 197   collect(GCCause::_metadata_GC_clear_soft_refs);
 198 
 199   // Retry allocation
 200   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 201   if (result != NULL) {
 202     return result;
 203   }
 204 
 205   // Expand and retry allocation
 206   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 207   if (result != NULL) {
 208     return result;
 209   }
 210 
 211   // Out of memory
 212   return NULL;
 213 }
 214 
 215 void ZCollectedHeap::collect(GCCause::Cause cause) {
 216   _driver->collect(cause);
 217 }
 218 
 219 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 220   // These collection requests are ignored since ZGC can't run a synchronous
 221   // GC cycle from within the VM thread. This is considered benign, since the
 222   // only GC causes coming in here should be heap dumper and heap inspector.
 223   // However, neither the heap dumper nor the heap inspector really need a GC
 224   // to happen, but the result of their heap iterations might in that case be
 225   // less accurate since they might include objects that would otherwise have
 226   // been collected by a GC.
 227   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
 228   guarantee(cause == GCCause::_heap_dump ||
 229             cause == GCCause::_heap_inspection, "Invalid cause");
 230 }
 231 
 232 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 233   // Not supported
 234   ShouldNotReachHere();
 235 }
 236 
 237 bool ZCollectedHeap::supports_tlab_allocation() const {
 238   return true;
 239 }
 240 
 241 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
 242   return _heap.tlab_capacity();
 243 }
 244 
 245 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
 246   return _heap.tlab_used();
 247 }
 248 
 249 size_t ZCollectedHeap::max_tlab_size() const {
 250   return _heap.max_tlab_size();
 251 }
 252 
 253 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
 254   return _heap.unsafe_max_tlab_alloc();
 255 }
 256 
 257 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
 258   return false;
 259 }
 260 
 261 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 262   // Not supported
 263   ShouldNotReachHere();
 264   return true;
 265 }
 266 
 267 bool ZCollectedHeap::card_mark_must_follow_store() const {
 268   // Not supported
 269   ShouldNotReachHere();
 270   return false;
 271 }
 272 
 273 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
 274   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
 275 }
 276 
 277 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
 278   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
 279 }
 280 
 281 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
 282   _heap.object_iterate(cl, true /* visit_weaks */);
 283 }
 284 
 285 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 286   _heap.object_iterate(cl, true /* visit_weaks */);
 287 }
 288 
 289 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
 290   return (HeapWord*)_heap.block_start((uintptr_t)addr);
 291 }
 292 
 293 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
 294   return _heap.block_is_obj((uintptr_t)addr);
 295 }
 296 
 297 void ZCollectedHeap::register_nmethod(nmethod* nm) {
 298   ZNMethod::register_nmethod(nm);
 299 }
 300 
 301 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
 302   ZNMethod::unregister_nmethod(nm);
 303 }
 304 
 305 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
 306   ZNMethod::flush_nmethod(nm);
 307 }
 308 
 309 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 310   // Does nothing
 311 }
 312 
 313 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 314   return _runtime_workers.workers();
 315 }
 316 
 317 jlong ZCollectedHeap::millis_since_last_gc() {
 318   return ZStatCycle::time_since_last() / MILLIUNITS;
 319 }
 320 
 321 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 322   tc->do_thread(_director);
 323   tc->do_thread(_driver);
 324   tc->do_thread(_uncommitter);
 325   tc->do_thread(_stat);
 326   _heap.worker_threads_do(tc);
 327   _runtime_workers.threads_do(tc);
 328 }
 329 
 330 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 331   const size_t capacity_in_words = capacity() / HeapWordSize;
 332   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 333   return VirtualSpaceSummary(reserved_region().start(),
 334                              reserved_region().start() + capacity_in_words,
 335                              reserved_region().start() + max_capacity_in_words);
 336 }
 337 
 338 void ZCollectedHeap::safepoint_synchronize_begin() {
 339   SuspendibleThreadSet::synchronize();
 340 }
 341 
 342 void ZCollectedHeap::safepoint_synchronize_end() {
 343   SuspendibleThreadSet::desynchronize();
 344 }
 345 
 346 void ZCollectedHeap::prepare_for_verify() {
 347   // Does nothing
 348 }
 349 
 350 void ZCollectedHeap::print_on(outputStream* st) const {
 351   _heap.print_on(st);
 352 }
 353 
 354 void ZCollectedHeap::print_on_error(outputStream* st) const {
 355   CollectedHeap::print_on_error(st);
 356 
 357   st->print_cr("Address Space");
 358   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
 359   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
 360   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
 361   st->print_cr( "Heap");
 362   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
 363   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 364   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 365   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 366   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 367   st->print_cr( "Metadata Bits");
 368   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 369   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 370   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 371   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 372   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 373 }
 374 
 375 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 376   _heap.print_extended_on(st);
 377 }
 378 
 379 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 380   _director->print_on(st);
 381   st->cr();
 382   _driver->print_on(st);
 383   st->cr();
 384   _uncommitter->print_on(st);
 385   st->cr();
 386   _stat->print_on(st);
 387   st->cr();
 388   _heap.print_worker_threads_on(st);
 389   _runtime_workers.print_threads_on(st);
 390 }
 391 
 392 void ZCollectedHeap::print_tracing_info() const {
 393   // Does nothing
 394 }
 395 
 396 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 397   _heap.verify();
 398 }
 399 
 400 bool ZCollectedHeap::is_oop(oop object) const {
 401   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 402 }