1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcArguments.hpp"
  26 #include "gc/shared/oopStorage.hpp"
  27 #include "gc/z/zAddress.hpp"
  28 #include "gc/z/zArray.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zHeap.inline.hpp"
  31 #include "gc/z/zHeapIterator.hpp"
  32 #include "gc/z/zList.inline.hpp"
  33 #include "gc/z/zLock.inline.hpp"
  34 #include "gc/z/zMark.inline.hpp"
  35 #include "gc/z/zOopClosures.inline.hpp"
  36 #include "gc/z/zPage.inline.hpp"
  37 #include "gc/z/zPageTable.inline.hpp"
  38 #include "gc/z/zRelocationSet.inline.hpp"
  39 #include "gc/z/zResurrection.hpp"
  40 #include "gc/z/zRootsIterator.hpp"
  41 #include "gc/z/zStat.hpp"
  42 #include "gc/z/zTask.hpp"
  43 #include "gc/z/zThread.hpp"
  44 #include "gc/z/zTracer.inline.hpp"
  45 #include "gc/z/zVerify.hpp"
  46 #include "gc/z/zVirtualMemory.inline.hpp"
  47 #include "gc/z/zWorkers.inline.hpp"
  48 #include "logging/log.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "runtime/thread.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/debug.hpp"
  56 
  57 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  58 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  59 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  60 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  61 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  62 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  63 
  64 ZHeap* ZHeap::_heap = NULL;
  65 
  66 ZHeap::ZHeap() :
  67     _workers(),
  68     _object_allocator(_workers.nworkers()),
  69     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  70     _page_table(),
  71     _forwarding_table(),
  72     _mark(&_workers, &_page_table),
  73     _reference_processor(&_workers),
  74     _weak_roots_processor(&_workers),
  75     _relocate(&_workers),
  76     _relocation_set(),
  77     _unload(&_workers),
  78     _serviceability(heap_min_size(), heap_max_size()) {
  79   // Install global heap instance
  80   assert(_heap == NULL, "Already initialized");
  81   _heap = this;
  82 
  83   // Update statistics
  84   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  85 }
  86 
  87 size_t ZHeap::heap_min_size() const {
  88   return MinHeapSize;
  89 }
  90 
  91 size_t ZHeap::heap_initial_size() const {
  92   return InitialHeapSize;
  93 }
  94 
  95 size_t ZHeap::heap_max_size() const {
  96   return MaxHeapSize;
  97 }
  98 
  99 size_t ZHeap::heap_max_reserve_size() const {
 100   // Reserve one small page per worker plus one shared medium page. This is still just
 101   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
 102   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
 103   return MIN2(max_reserve_size, heap_max_size());
 104 }
 105 
 106 bool ZHeap::is_initialized() const {
 107   return _page_allocator.is_initialized() && _mark.is_initialized();
 108 }
 109 
 110 size_t ZHeap::min_capacity() const {
 111   return _page_allocator.min_capacity();
 112 }
 113 
 114 size_t ZHeap::max_capacity() const {
 115   return _page_allocator.max_capacity();
 116 }
 117 
 118 size_t ZHeap::soft_max_capacity() const {
 119   return _page_allocator.soft_max_capacity();
 120 }
 121 
 122 size_t ZHeap::capacity() const {
 123   return _page_allocator.capacity();
 124 }
 125 
 126 size_t ZHeap::max_reserve() const {
 127   return _page_allocator.max_reserve();
 128 }
 129 
 130 size_t ZHeap::used_high() const {
 131   return _page_allocator.used_high();
 132 }
 133 
 134 size_t ZHeap::used_low() const {
 135   return _page_allocator.used_low();
 136 }
 137 
 138 size_t ZHeap::used() const {
 139   return _page_allocator.used();
 140 }
 141 
 142 size_t ZHeap::unused() const {
 143   return _page_allocator.unused();
 144 }
 145 
 146 size_t ZHeap::allocated() const {
 147   return _page_allocator.allocated();
 148 }
 149 
 150 size_t ZHeap::reclaimed() const {
 151   return _page_allocator.reclaimed();
 152 }
 153 
 154 size_t ZHeap::tlab_capacity() const {
 155   return capacity();
 156 }
 157 
 158 size_t ZHeap::tlab_used() const {
 159   return _object_allocator.used();
 160 }
 161 
 162 size_t ZHeap::max_tlab_size() const {
 163   return ZObjectSizeLimitSmall;
 164 }
 165 
 166 size_t ZHeap::unsafe_max_tlab_alloc() const {
 167   size_t size = _object_allocator.remaining();
 168 
 169   if (size < MinTLABSize) {
 170     // The remaining space in the allocator is not enough to
 171     // fit the smallest possible TLAB. This means that the next
 172     // TLAB allocation will force the allocator to get a new
 173     // backing page anyway, which in turn means that we can then
 174     // fit the largest possible TLAB.
 175     size = max_tlab_size();
 176   }
 177 
 178   return MIN2(size, max_tlab_size());
 179 }
 180 
 181 bool ZHeap::is_in(uintptr_t addr) const {
 182   // An address is considered to be "in the heap" if it points into
 183   // the allocated part of a pages, regardless of which heap view is
 184   // used. Note that an address with the finalizable metadata bit set
 185   // is not pointing into a heap view, and therefore not considered
 186   // to be "in the heap".
 187 
 188   if (ZAddress::is_in(addr)) {
 189     const ZPage* const page = _page_table.get(addr);
 190     if (page != NULL) {
 191       return page->is_in(addr);
 192     }
 193   }
 194 
 195   return false;
 196 }
 197 
 198 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 199   const ZPage* const page = _page_table.get(addr);
 200   return page->block_start(addr);
 201 }
 202 
 203 bool ZHeap::block_is_obj(uintptr_t addr) const {
 204   const ZPage* const page = _page_table.get(addr);
 205   return page->block_is_obj(addr);
 206 }
 207 
 208 uint ZHeap::nconcurrent_worker_threads() const {
 209   return _workers.nconcurrent();
 210 }
 211 
 212 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 213   return _workers.nconcurrent_no_boost();
 214 }
 215 
 216 void ZHeap::set_boost_worker_threads(bool boost) {
 217   _workers.set_boost(boost);
 218 }
 219 
 220 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 221   _workers.threads_do(tc);
 222 }
 223 
 224 void ZHeap::print_worker_threads_on(outputStream* st) const {
 225   _workers.print_threads_on(st);
 226 }
 227 
 228 void ZHeap::out_of_memory() {
 229   ResourceMark rm;
 230 
 231   ZStatInc(ZCounterOutOfMemory);
 232   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 233 }
 234 
 235 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 236   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 237   if (page != NULL) {
 238     // Insert page table entry
 239     _page_table.insert(page);
 240   }
 241 
 242   return page;
 243 }
 244 
 245 void ZHeap::undo_alloc_page(ZPage* page) {
 246   assert(page->is_allocating(), "Invalid page state");
 247 
 248   ZStatInc(ZCounterUndoPageAllocation);
 249   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 250                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 251 
 252   free_page(page, false /* reclaimed */);
 253 }
 254 
 255 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 256   // Remove page table entry
 257   _page_table.remove(page);
 258 
 259   // Free page
 260   _page_allocator.free_page(page, reclaimed);
 261 }
 262 
 263 uint64_t ZHeap::uncommit(uint64_t delay) {
 264   return _page_allocator.uncommit(delay);
 265 }
 266 
 267 void ZHeap::before_flip() {
 268   if (ZVerifyViews) {
 269     // Unmap all pages
 270     _page_allocator.debug_unmap_all_pages();
 271   }
 272 }
 273 
 274 void ZHeap::after_flip() {
 275   if (ZVerifyViews) {
 276     // Map all pages
 277     ZPageTableIterator iter(&_page_table);
 278     for (ZPage* page; iter.next(&page);) {
 279       _page_allocator.debug_map_page(page);
 280     }
 281     _page_allocator.debug_map_cached_pages();
 282   }
 283 }
 284 
 285 void ZHeap::flip_to_marked() {
 286   before_flip();
 287   ZAddress::flip_to_marked();
 288   after_flip();
 289 }
 290 
 291 void ZHeap::flip_to_remapped() {
 292   before_flip();
 293   ZAddress::flip_to_remapped();
 294   after_flip();
 295 }
 296 
 297 void ZHeap::mark_start() {
 298   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 299 
 300   // Update statistics
 301   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 302 
 303   // Flip address view
 304   flip_to_marked();
 305 
 306   // Retire allocating pages
 307   _object_allocator.retire_pages();
 308 
 309   // Reset allocated/reclaimed/used statistics
 310   _page_allocator.reset_statistics();
 311 
 312   // Reset encountered/dropped/enqueued statistics
 313   _reference_processor.reset_statistics();
 314 
 315   // Enter mark phase
 316   ZGlobalPhase = ZPhaseMark;
 317 
 318   // Reset marking information and mark roots
 319   _mark.start();
 320 
 321   // Update statistics
 322   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 323 }
 324 
 325 void ZHeap::mark(bool initial) {
 326   _mark.mark(initial);
 327 }
 328 
 329 void ZHeap::mark_flush_and_free(Thread* thread) {
 330   _mark.flush_and_free(thread);
 331 }
 332 
 333 bool ZHeap::mark_end() {
 334   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 335 
 336   // Try end marking
 337   if (!_mark.end()) {
 338     // Marking not completed, continue concurrent mark
 339     return false;
 340   }
 341 
 342   // Enter mark completed phase
 343   ZGlobalPhase = ZPhaseMarkCompleted;
 344 
 345   // Verify after mark
 346   ZVerify::after_mark();
 347 
 348   // Update statistics
 349   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 350   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 351 
 352   // Block resurrection of weak/phantom references
 353   ZResurrection::block();
 354 
 355   // Process weak roots
 356   _weak_roots_processor.process_weak_roots();
 357 
 358   // Prepare to unload unused classes and code
 359   _unload.prepare();
 360 
 361   return true;
 362 }
 363 
 364 void ZHeap::set_soft_reference_policy(bool clear) {
 365   _reference_processor.set_soft_reference_policy(clear);
 366 }
 367 
 368 void ZHeap::process_non_strong_references() {
 369   // Process Soft/Weak/Final/PhantomReferences
 370   _reference_processor.process_references();
 371 
 372   // Process concurrent weak roots
 373   _weak_roots_processor.process_concurrent_weak_roots();
 374 
 375   // Unload unused classes and code
 376   _unload.unload();
 377 
 378   // Unblock resurrection of weak/phantom references
 379   ZResurrection::unblock();
 380 
 381   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 382   // must be done after unblocking resurrection. Otherwise the
 383   // Finalizer thread could call Reference.get() on the Finalizers
 384   // that were just enqueued, which would incorrectly return null
 385   // during the resurrection block window, since such referents
 386   // are only Finalizable marked.
 387   _reference_processor.enqueue_references();
 388 }
 389 
 390 class ZHeapReclaimPagesTask : public ZTask {
 391 private:
 392   ZArrayParallelIterator<ZPage*> _iter;
 393 
 394 public:
 395   ZHeapReclaimPagesTask(ZArray<ZPage*>* pages) :
 396       ZTask("ZHeapReclaimPagesTask"),
 397       _iter(pages) {}
 398 
 399   virtual void work() {
 400     for (ZPage* page; _iter.next(&page);) {
 401       ZHeap::heap()->free_page(page, true /* reclaimed */);
 402     }
 403   }
 404 };
 405 
 406 void ZHeap::select_relocation_set() {
 407   // Do not allow pages to be deleted
 408   _page_allocator.enable_deferred_delete();
 409 
 410   // Register relocatable pages with selector
 411   ZRelocationSetSelector selector;
 412   ZPageTableIterator pt_iter(&_page_table);
 413   for (ZPage* page; pt_iter.next(&page);) {
 414     if (!page->is_relocatable()) {
 415       // Not relocatable, don't register
 416       continue;
 417     }
 418 
 419     if (page->is_marked()) {
 420       // Register live page
 421       selector.register_live_page(page);
 422     } else {
 423       // Register reclaimable page
 424       selector.register_reclaimable_page(page);
 425     }
 426   }
 427 
 428   // Allow pages to be deleted
 429   _page_allocator.disable_deferred_delete();
 430 
 431   // Free reclaimable page
 432   ZHeapReclaimPagesTask task(selector.reclaimable());
 433   _workers.run_concurrent(&task);
 434 
 435   // Select pages to relocate
 436   selector.select(&_relocation_set);
 437 
 438   // Setup forwarding table
 439   ZRelocationSetIterator rs_iter(&_relocation_set);
 440   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 441     _forwarding_table.insert(forwarding);
 442   }
 443 
 444   // Update statistics
 445   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 446   ZStatHeap::set_at_select_relocation_set(selector.live(),
 447                                           selector.garbage(),
 448                                           reclaimed());
 449 }
 450 
 451 void ZHeap::reset_relocation_set() {
 452   // Reset forwarding table
 453   ZRelocationSetIterator iter(&_relocation_set);
 454   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 455     _forwarding_table.remove(forwarding);
 456   }
 457 
 458   // Reset relocation set
 459   _relocation_set.reset();
 460 }
 461 
 462 void ZHeap::relocate_start() {
 463   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 464 
 465   // Finish unloading of classes and code
 466   _unload.finish();
 467 
 468   // Flip address view
 469   flip_to_remapped();
 470 
 471   // Enter relocate phase
 472   ZGlobalPhase = ZPhaseRelocate;
 473 
 474   // Update statistics
 475   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 476   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 477 
 478   // Remap/Relocate roots
 479   _relocate.start();
 480 }
 481 
 482 void ZHeap::relocate() {
 483   // Relocate relocation set
 484   const bool success = _relocate.relocate(&_relocation_set);
 485 
 486   // Update statistics
 487   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 488   ZStatRelocation::set_at_relocate_end(success);
 489   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 490                                  used(), used_high(), used_low());
 491 }
 492 
 493 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
 494   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 495 
 496   ZHeapIterator iter;
 497   iter.objects_do(cl, visit_weaks);
 498 }
 499 
 500 void ZHeap::serviceability_initialize() {
 501   _serviceability.initialize();
 502 }
 503 
 504 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 505   return _serviceability.memory_manager();
 506 }
 507 
 508 MemoryPool* ZHeap::serviceability_memory_pool() {
 509   return _serviceability.memory_pool();
 510 }
 511 
 512 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 513   return _serviceability.counters();
 514 }
 515 
 516 void ZHeap::print_on(outputStream* st) const {
 517   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 518                used() / M,
 519                capacity() / M,
 520                max_capacity() / M);
 521   MetaspaceUtils::print_on(st);
 522 }
 523 
 524 void ZHeap::print_extended_on(outputStream* st) const {
 525   print_on(st);
 526   st->cr();
 527 
 528   // Do not allow pages to be deleted
 529   _page_allocator.enable_deferred_delete();
 530 
 531   // Print all pages
 532   ZPageTableIterator iter(&_page_table);
 533   for (ZPage* page; iter.next(&page);) {
 534     page->print_on(st);
 535   }
 536 
 537   // Allow pages to be deleted
 538   _page_allocator.enable_deferred_delete();
 539 
 540   st->cr();
 541 }
 542 
 543 void ZHeap::verify() {
 544   // Heap verification can only be done between mark end and
 545   // relocate start. This is the only window where all oop are
 546   // good and the whole heap is in a consistent state.
 547   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 548 
 549   ZVerify::after_weak_processing();
 550 }