1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcHeapSummary.hpp"
  26 #include "gc/z/zCollectedHeap.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zNMethodTable.hpp"
  30 #include "gc/z/zServiceability.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zUtils.inline.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 
  35 ZCollectedHeap* ZCollectedHeap::heap() {
  36   CollectedHeap* heap = Universe::heap();
  37   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  38   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  39   return (ZCollectedHeap*)heap;
  40 }
  41 
  42 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
  43     _collector_policy(policy),
  44     _soft_ref_policy(),
  45     _barrier_set(),
  46     _initialize(&_barrier_set),
  47     _heap(),
  48     _director(new ZDirector()),
  49     _driver(new ZDriver()),
  50     _stat(new ZStat()),
  51     _runtime_workers() {}
  52 
  53 CollectedHeap::Name ZCollectedHeap::kind() const {
  54   return CollectedHeap::Z;
  55 }
  56 
  57 const char* ZCollectedHeap::name() const {
  58   return ZGCName;
  59 }
  60 
  61 jint ZCollectedHeap::initialize() {
  62   if (!_heap.is_initialized()) {
  63     return JNI_ENOMEM;
  64   }
  65 
  66   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
  67                              (HeapWord*)ZAddressReservedEnd());
  68 
  69   return JNI_OK;
  70 }
  71 
  72 void ZCollectedHeap::initialize_serviceability() {
  73   _heap.serviceability_initialize();
  74 }
  75 
  76 void ZCollectedHeap::stop() {
  77   _director->stop();
  78   _driver->stop();
  79   _stat->stop();
  80 }
  81 
  82 CollectorPolicy* ZCollectedHeap::collector_policy() const {
  83   return _collector_policy;
  84 }
  85 
  86 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  87   return &_soft_ref_policy;
  88 }
  89 
  90 size_t ZCollectedHeap::max_capacity() const {
  91   return _heap.max_capacity();
  92 }
  93 
  94 size_t ZCollectedHeap::capacity() const {
  95   return _heap.capacity();
  96 }
  97 
  98 size_t ZCollectedHeap::used() const {
  99   return _heap.used();
 100 }
 101 
 102 bool ZCollectedHeap::is_maximal_no_gc() const {
 103   // Not supported
 104   ShouldNotReachHere();
 105   return false;
 106 }
 107 
 108 bool ZCollectedHeap::is_scavengable(oop obj) {
 109   return false;
 110 }
 111 
 112 bool ZCollectedHeap::is_in(const void* p) const {
 113   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
 114 }
 115 
 116 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
 117   return is_in(p);
 118 }
 119 
 120 void ZCollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 121   // Does nothing, not a parsable heap
 122 }
 123 
 124 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 125   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
 126   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
 127 
 128   if (addr != 0) {
 129     *actual_size = requested_size;
 130   }
 131 
 132   return (HeapWord*)addr;
 133 }
 134 
 135 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
 136   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 137   return (HeapWord*)_heap.alloc_object(size_in_bytes);
 138 }
 139 
 140 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 141                                                              size_t size,
 142                                                              Metaspace::MetadataType mdtype) {
 143   MetaWord* result;
 144 
 145   // Start asynchronous GC
 146   collect(GCCause::_metadata_GC_threshold);
 147 
 148   // Expand and retry allocation
 149   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 150   if (result != NULL) {
 151     return result;
 152   }
 153 
 154   // Start synchronous GC
 155   collect(GCCause::_metadata_GC_clear_soft_refs);
 156 
 157   // Retry allocation
 158   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 159   if (result != NULL) {
 160     return result;
 161   }
 162 
 163   // Expand and retry allocation
 164   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 165   if (result != NULL) {
 166     return result;
 167   }
 168 
 169   // Out of memory
 170   return NULL;
 171 }
 172 
 173 void ZCollectedHeap::collect(GCCause::Cause cause) {
 174   _driver->collect(cause);
 175 }
 176 
 177 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 178   // These collection requests are ignored since ZGC can't run a synchronous
 179   // GC cycle from within the VM thread. This is considered benign, since the
 180   // only GC causes coming in here should be heap dumper and heap inspector.
 181   // However, neither the heap dumper nor the heap inspector really need a GC
 182   // to happen, but the result of their heap iterations might in that case be
 183   // less accurate since they might include objects that would otherwise have
 184   // been collected by a GC.
 185   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
 186   guarantee(cause == GCCause::_heap_dump ||
 187             cause == GCCause::_heap_inspection, "Invalid cause");
 188 }
 189 
 190 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 191   // Not supported
 192   ShouldNotReachHere();
 193 }
 194 
 195 bool ZCollectedHeap::supports_tlab_allocation() const {
 196   return true;
 197 }
 198 
 199 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
 200   return _heap.tlab_capacity();
 201 }
 202 
 203 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
 204   return _heap.tlab_used();
 205 }
 206 
 207 size_t ZCollectedHeap::max_tlab_size() const {
 208   return _heap.max_tlab_size();
 209 }
 210 
 211 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
 212   return _heap.unsafe_max_tlab_alloc();
 213 }
 214 
 215 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
 216   return false;
 217 }
 218 
 219 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 220   // Not supported
 221   ShouldNotReachHere();
 222   return true;
 223 }
 224 
 225 bool ZCollectedHeap::card_mark_must_follow_store() const {
 226   // Not supported
 227   ShouldNotReachHere();
 228   return false;
 229 }
 230 
 231 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
 232   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
 233 }
 234 
 235 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
 236   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
 237 }
 238 
 239 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
 240   _heap.object_iterate(cl, true /* visit_referents */);
 241 }
 242 
 243 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 244   _heap.object_iterate(cl, true /* visit_referents */);
 245 }
 246 
 247 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
 248   return (HeapWord*)_heap.block_start((uintptr_t)addr);
 249 }
 250 
 251 size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
 252   size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
 253   return ZUtils::bytes_to_words(size_in_bytes);
 254 }
 255 
 256 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
 257   return _heap.block_is_obj((uintptr_t)addr);
 258 }
 259 
 260 void ZCollectedHeap::register_nmethod(nmethod* nm) {
 261   assert_locked_or_safepoint(CodeCache_lock);
 262   ZNMethodTable::register_nmethod(nm);
 263 }
 264 
 265 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
 266   assert_locked_or_safepoint(CodeCache_lock);
 267   ZNMethodTable::unregister_nmethod(nm);
 268 }
 269 
 270 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 271   // Does nothing
 272 }
 273 
 274 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 275   return _runtime_workers.workers();
 276 }
 277 
 278 jlong ZCollectedHeap::millis_since_last_gc() {
 279   return ZStatCycle::time_since_last() / MILLIUNITS;
 280 }
 281 
 282 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 283   tc->do_thread(_director);
 284   tc->do_thread(_driver);
 285   tc->do_thread(_stat);
 286   _heap.worker_threads_do(tc);
 287   _runtime_workers.threads_do(tc);
 288 }
 289 
 290 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 291   const size_t capacity_in_words = capacity() / HeapWordSize;
 292   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 293   return VirtualSpaceSummary(reserved_region().start(),
 294                              reserved_region().start() + capacity_in_words,
 295                              reserved_region().start() + max_capacity_in_words);
 296 }
 297 
 298 void ZCollectedHeap::prepare_for_verify() {
 299   // Does nothing
 300 }
 301 
 302 void ZCollectedHeap::print_on(outputStream* st) const {
 303   _heap.print_on(st);
 304 }
 305 
 306 void ZCollectedHeap::print_on_error(outputStream* st) const {
 307   CollectedHeap::print_on_error(st);
 308 
 309   st->print_cr("Address Space");
 310   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
 311   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
 312   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
 313   st->print_cr( "Heap");
 314   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
 315   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 316   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 317   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 318   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 319   st->print_cr( "Metadata Bits");
 320   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 321   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 322   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 323   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 324   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 325 }
 326 
 327 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 328   _heap.print_extended_on(st);
 329 }
 330 
 331 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 332   _director->print_on(st);
 333   st->cr();
 334   _driver->print_on(st);
 335   st->cr();
 336   _stat->print_on(st);
 337   st->cr();
 338   _heap.print_worker_threads_on(st);
 339   _runtime_workers.print_threads_on(st);
 340 }
 341 
 342 void ZCollectedHeap::print_tracing_info() const {
 343   // Does nothing
 344 }
 345 
 346 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 347   _heap.verify();
 348 }
 349 
 350 bool ZCollectedHeap::is_oop(oop object) const {
 351   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 352 }