1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zMark.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRelocationSet.inline.hpp"
  34 #include "gc/z/zRelocationSetSelector.hpp"
  35 #include "gc/z/zResurrection.hpp"
  36 #include "gc/z/zStat.hpp"
  37 #include "gc/z/zThread.inline.hpp"
  38 #include "gc/z/zVerify.hpp"
  39 #include "gc/z/zWorkers.inline.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/iterator.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "runtime/safepoint.hpp"
  44 #include "runtime/thread.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  48 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  49 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  50 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  51 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  52 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  53 
  54 ZHeap* ZHeap::_heap = NULL;
  55 
  56 ZHeap::ZHeap() :
  57     _workers(),
  58     _object_allocator(),
  59     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  60     _page_table(),
  61     _forwarding_table(),
  62     _mark(&_workers, &_page_table),
  63     _reference_processor(&_workers),
  64     _weak_roots_processor(&_workers),
  65     _relocate(&_workers),
  66     _relocation_set(),
  67     _unload(&_workers),
  68     _serviceability(heap_min_size(), heap_max_size()) {
  69   // Install global heap instance
  70   assert(_heap == NULL, "Already initialized");
  71   _heap = this;
  72 
  73   // Update statistics
  74   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  75 }
  76 
  77 size_t ZHeap::heap_min_size() const {
  78   return MinHeapSize;
  79 }
  80 
  81 size_t ZHeap::heap_initial_size() const {
  82   return InitialHeapSize;
  83 }
  84 
  85 size_t ZHeap::heap_max_size() const {
  86   return MaxHeapSize;
  87 }
  88 
  89 size_t ZHeap::heap_max_reserve_size() const {
  90   // Reserve one small page per worker plus one shared medium page. This is still just
  91   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  92   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  93   return MIN2(max_reserve_size, heap_max_size());
  94 }
  95 
  96 bool ZHeap::is_initialized() const {
  97   return _page_allocator.is_initialized() && _mark.is_initialized();
  98 }
  99 
 100 size_t ZHeap::min_capacity() const {
 101   return _page_allocator.min_capacity();
 102 }
 103 
 104 size_t ZHeap::max_capacity() const {
 105   return _page_allocator.max_capacity();
 106 }
 107 
 108 size_t ZHeap::soft_max_capacity() const {
 109   return _page_allocator.soft_max_capacity();
 110 }
 111 
 112 size_t ZHeap::capacity() const {
 113   return _page_allocator.capacity();
 114 }
 115 
 116 size_t ZHeap::max_reserve() const {
 117   return _page_allocator.max_reserve();
 118 }
 119 
 120 size_t ZHeap::used_high() const {
 121   return _page_allocator.used_high();
 122 }
 123 
 124 size_t ZHeap::used_low() const {
 125   return _page_allocator.used_low();
 126 }
 127 
 128 size_t ZHeap::used() const {
 129   return _page_allocator.used();
 130 }
 131 
 132 size_t ZHeap::unused() const {
 133   return _page_allocator.unused();
 134 }
 135 
 136 size_t ZHeap::allocated() const {
 137   return _page_allocator.allocated();
 138 }
 139 
 140 size_t ZHeap::reclaimed() const {
 141   return _page_allocator.reclaimed();
 142 }
 143 
 144 size_t ZHeap::tlab_capacity() const {
 145   return capacity();
 146 }
 147 
 148 size_t ZHeap::tlab_used() const {
 149   return _object_allocator.used();
 150 }
 151 
 152 size_t ZHeap::max_tlab_size() const {
 153   return ZObjectSizeLimitSmall;
 154 }
 155 
 156 size_t ZHeap::unsafe_max_tlab_alloc() const {
 157   size_t size = _object_allocator.remaining();
 158 
 159   if (size < MinTLABSize) {
 160     // The remaining space in the allocator is not enough to
 161     // fit the smallest possible TLAB. This means that the next
 162     // TLAB allocation will force the allocator to get a new
 163     // backing page anyway, which in turn means that we can then
 164     // fit the largest possible TLAB.
 165     size = max_tlab_size();
 166   }
 167 
 168   return MIN2(size, max_tlab_size());
 169 }
 170 
 171 bool ZHeap::is_in(uintptr_t addr) const {
 172   // An address is considered to be "in the heap" if it points into
 173   // the allocated part of a page, regardless of which heap view is
 174   // used. Note that an address with the finalizable metadata bit set
 175   // is not pointing into a heap view, and therefore not considered
 176   // to be "in the heap".
 177 
 178   if (ZAddress::is_in(addr)) {
 179     const ZPage* const page = _page_table.get(addr);
 180     if (page != NULL) {
 181       return page->is_in(addr);
 182     }
 183   }
 184 
 185   return false;
 186 }
 187 
 188 uint ZHeap::nconcurrent_worker_threads() const {
 189   return _workers.nconcurrent();
 190 }
 191 
 192 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 193   return _workers.nconcurrent_no_boost();
 194 }
 195 
 196 void ZHeap::set_boost_worker_threads(bool boost) {
 197   _workers.set_boost(boost);
 198 }
 199 
 200 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 201   _workers.threads_do(tc);
 202 }
 203 
 204 void ZHeap::print_worker_threads_on(outputStream* st) const {
 205   _workers.print_threads_on(st);
 206 }
 207 
 208 void ZHeap::out_of_memory() {
 209   ResourceMark rm;
 210 
 211   ZStatInc(ZCounterOutOfMemory);
 212   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 213 }
 214 
 215 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 216   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 217   if (page != NULL) {
 218     // Insert page table entry
 219     _page_table.insert(page);
 220   }
 221 
 222   return page;
 223 }
 224 
 225 void ZHeap::undo_alloc_page(ZPage* page) {
 226   assert(page->is_allocating(), "Invalid page state");
 227 
 228   ZStatInc(ZCounterUndoPageAllocation);
 229   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 230                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 231 
 232   free_page(page, false /* reclaimed */);
 233 }
 234 
 235 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 236   // Remove page table entry
 237   _page_table.remove(page);
 238 
 239   // Free page
 240   _page_allocator.free_page(page, reclaimed);
 241 }
 242 
 243 uint64_t ZHeap::uncommit(uint64_t delay) {
 244   return _page_allocator.uncommit(delay);
 245 }
 246 
 247 void ZHeap::flip_to_marked() {
 248   ZVerifyViewsFlip flip(&_page_allocator);
 249   ZAddress::flip_to_marked();
 250 }
 251 
 252 void ZHeap::flip_to_remapped() {
 253   ZVerifyViewsFlip flip(&_page_allocator);
 254   ZAddress::flip_to_remapped();
 255 }
 256 
 257 void ZHeap::mark_start() {
 258   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 259 
 260   // Update statistics
 261   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 262 
 263   // Flip address view
 264   flip_to_marked();
 265 
 266   // Retire allocating pages
 267   _object_allocator.retire_pages();
 268 
 269   // Reset allocated/reclaimed/used statistics
 270   _page_allocator.reset_statistics();
 271 
 272   // Reset encountered/dropped/enqueued statistics
 273   _reference_processor.reset_statistics();
 274 
 275   // Enter mark phase
 276   ZGlobalPhase = ZPhaseMark;
 277 
 278   // Reset marking information and mark roots
 279   _mark.start();
 280 
 281   // Update statistics
 282   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 283 }
 284 
 285 void ZHeap::mark(bool initial) {
 286   _mark.mark(initial);
 287 }
 288 
 289 void ZHeap::mark_flush_and_free(Thread* thread) {
 290   _mark.flush_and_free(thread);
 291 }
 292 
 293 bool ZHeap::mark_end() {
 294   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 295 
 296   // Try end marking
 297   if (!_mark.end()) {
 298     // Marking not completed, continue concurrent mark
 299     return false;
 300   }
 301 
 302   // Enter mark completed phase
 303   ZGlobalPhase = ZPhaseMarkCompleted;
 304 
 305   // Verify after mark
 306   ZVerify::after_mark();
 307 
 308   // Update statistics
 309   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 310   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 311 
 312   // Block resurrection of weak/phantom references
 313   ZResurrection::block();
 314 
 315   // Process weak roots
 316   _weak_roots_processor.process_weak_roots();
 317 
 318   // Prepare to unload unused classes and code
 319   _unload.prepare();
 320 
 321   return true;
 322 }
 323 
 324 void ZHeap::set_soft_reference_policy(bool clear) {
 325   _reference_processor.set_soft_reference_policy(clear);
 326 }
 327 
 328 void ZHeap::process_non_strong_references() {
 329   // Process Soft/Weak/Final/PhantomReferences
 330   _reference_processor.process_references();
 331 
 332   // Process concurrent weak roots
 333   _weak_roots_processor.process_concurrent_weak_roots();
 334 
 335   // Unload unused classes and code
 336   _unload.unload();
 337 
 338   // Unblock resurrection of weak/phantom references
 339   ZResurrection::unblock();
 340 
 341   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 342   // must be done after unblocking resurrection. Otherwise the
 343   // Finalizer thread could call Reference.get() on the Finalizers
 344   // that were just enqueued, which would incorrectly return null
 345   // during the resurrection block window, since such referents
 346   // are only Finalizable marked.
 347   _reference_processor.enqueue_references();
 348 }
 349 
 350 void ZHeap::select_relocation_set() {
 351   // Do not allow pages to be deleted
 352   _page_allocator.enable_deferred_delete();
 353 
 354   // Register relocatable pages with selector
 355   ZRelocationSetSelector selector;
 356   ZPageTableIterator pt_iter(&_page_table);
 357   for (ZPage* page; pt_iter.next(&page);) {
 358     if (!page->is_relocatable()) {
 359       // Not relocatable, don't register
 360       continue;
 361     }
 362 
 363     if (page->is_marked()) {
 364       // Register live page
 365       selector.register_live_page(page);
 366     } else {
 367       // Register garbage page
 368       selector.register_garbage_page(page);
 369 
 370       // Reclaim page immediately
 371       free_page(page, true /* reclaimed */);
 372     }
 373   }
 374 
 375   // Allow pages to be deleted
 376   _page_allocator.disable_deferred_delete();
 377 
 378   // Select pages to relocate
 379   selector.select(&_relocation_set);
 380 
 381   // Setup forwarding table
 382   ZRelocationSetIterator rs_iter(&_relocation_set);
 383   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 384     _forwarding_table.insert(forwarding);
 385   }
 386 
 387   // Update statistics
 388   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 389   ZStatHeap::set_at_select_relocation_set(selector.live(),
 390                                           selector.garbage(),
 391                                           reclaimed());
 392 }
 393 
 394 void ZHeap::reset_relocation_set() {
 395   // Reset forwarding table
 396   ZRelocationSetIterator iter(&_relocation_set);
 397   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 398     _forwarding_table.remove(forwarding);
 399   }
 400 
 401   // Reset relocation set
 402   _relocation_set.reset();
 403 }
 404 
 405 void ZHeap::relocate_start() {
 406   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 407 
 408   // Finish unloading of classes and code
 409   _unload.finish();
 410 
 411   // Flip address view
 412   flip_to_remapped();
 413 
 414   // Enter relocate phase
 415   ZGlobalPhase = ZPhaseRelocate;
 416 
 417   // Update statistics
 418   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 419   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 420 
 421   // Remap/Relocate roots
 422   _relocate.start();
 423 }
 424 
 425 void ZHeap::relocate() {
 426   // Relocate relocation set
 427   const bool success = _relocate.relocate(&_relocation_set);
 428 
 429   // Update statistics
 430   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 431   ZStatRelocation::set_at_relocate_end(success);
 432   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 433                                  used(), used_high(), used_low());
 434 }
 435 
 436 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
 437   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 438 
 439   ZHeapIterator iter;
 440   iter.objects_do(cl, visit_weaks);
 441 }
 442 
 443 void ZHeap::pages_do(ZPageClosure* cl) {
 444   ZPageTableIterator iter(&_page_table);
 445   for (ZPage* page; iter.next(&page);) {
 446     cl->do_page(page);
 447   }
 448   _page_allocator.pages_do(cl);
 449 }
 450 
 451 void ZHeap::serviceability_initialize() {
 452   _serviceability.initialize();
 453 }
 454 
 455 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 456   return _serviceability.memory_manager();
 457 }
 458 
 459 MemoryPool* ZHeap::serviceability_memory_pool() {
 460   return _serviceability.memory_pool();
 461 }
 462 
 463 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 464   return _serviceability.counters();
 465 }
 466 
 467 void ZHeap::print_on(outputStream* st) const {
 468   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 469                used() / M,
 470                capacity() / M,
 471                max_capacity() / M);
 472   MetaspaceUtils::print_on(st);
 473 }
 474 
 475 void ZHeap::print_extended_on(outputStream* st) const {
 476   print_on(st);
 477   st->cr();
 478 
 479   // Do not allow pages to be deleted
 480   _page_allocator.enable_deferred_delete();
 481 
 482   // Print all pages
 483   ZPageTableIterator iter(&_page_table);
 484   for (ZPage* page; iter.next(&page);) {
 485     page->print_on(st);
 486   }
 487 
 488   // Allow pages to be deleted
 489   _page_allocator.enable_deferred_delete();
 490 
 491   st->cr();
 492 }
 493 
 494 bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
 495   if (LocationPrinter::is_valid_obj((void*)addr)) {
 496     st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
 497     ZOop::from_address(addr)->print_on(st);
 498     return true;
 499   }
 500 
 501   return false;
 502 }
 503 
 504 void ZHeap::verify() {
 505   // Heap verification can only be done between mark end and
 506   // relocate start. This is the only window where all oop are
 507   // good and the whole heap is in a consistent state.
 508   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 509 
 510   ZVerify::after_weak_processing();
 511 }