1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcArguments.hpp"
  26 #include "gc/shared/oopStorage.hpp"
  27 #include "gc/z/zAddress.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zHeap.inline.hpp"
  30 #include "gc/z/zHeapIterator.hpp"
  31 #include "gc/z/zList.inline.hpp"
  32 #include "gc/z/zLock.inline.hpp"
  33 #include "gc/z/zMark.inline.hpp"
  34 #include "gc/z/zOopClosures.inline.hpp"
  35 #include "gc/z/zPage.inline.hpp"
  36 #include "gc/z/zPageTable.inline.hpp"
  37 #include "gc/z/zRelocationSet.inline.hpp"
  38 #include "gc/z/zResurrection.hpp"
  39 #include "gc/z/zRootsIterator.hpp"
  40 #include "gc/z/zStat.hpp"
  41 #include "gc/z/zTask.hpp"
  42 #include "gc/z/zThread.hpp"
  43 #include "gc/z/zTracer.inline.hpp"
  44 #include "gc/z/zVirtualMemory.inline.hpp"
  45 #include "gc/z/zWorkers.inline.hpp"
  46 #include "logging/log.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/arguments.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/thread.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/debug.hpp"
  54 
  55 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  56 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  57 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  58 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  59 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  60 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  61 
  62 ZHeap* ZHeap::_heap = NULL;
  63 
  64 ZHeap::ZHeap() :
  65     _workers(),
  66     _object_allocator(_workers.nworkers()),
  67     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  68     _page_table(),
  69     _forwarding_table(),
  70     _mark(&_workers, &_page_table),
  71     _reference_processor(&_workers),
  72     _weak_roots_processor(&_workers),
  73     _relocate(&_workers),
  74     _relocation_set(),
  75     _unload(&_workers),
  76     _serviceability(heap_min_size(), heap_max_size()) {
  77   // Install global heap instance
  78   assert(_heap == NULL, "Already initialized");
  79   _heap = this;
  80 
  81   // Update statistics
  82   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  83 }
  84 
  85 size_t ZHeap::heap_min_size() const {
  86   return MinHeapSize;
  87 }
  88 
  89 size_t ZHeap::heap_initial_size() const {
  90   return InitialHeapSize;
  91 }
  92 
  93 size_t ZHeap::heap_max_size() const {
  94   return MaxHeapSize;
  95 }
  96 
  97 size_t ZHeap::heap_max_reserve_size() const {
  98   // Reserve one small page per worker plus one shared medium page. This is still just
  99   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
 100   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
 101   return MIN2(max_reserve_size, heap_max_size());
 102 }
 103 
 104 bool ZHeap::is_initialized() const {
 105   return _page_allocator.is_initialized() && _mark.is_initialized();
 106 }
 107 
 108 size_t ZHeap::min_capacity() const {
 109   return _page_allocator.min_capacity();
 110 }
 111 
 112 size_t ZHeap::max_capacity() const {
 113   return _page_allocator.max_capacity();
 114 }
 115 
 116 size_t ZHeap::current_max_capacity() const {
 117   return _page_allocator.current_max_capacity();
 118 }
 119 
 120 size_t ZHeap::capacity() const {
 121   return _page_allocator.capacity();
 122 }
 123 
 124 size_t ZHeap::max_reserve() const {
 125   return _page_allocator.max_reserve();
 126 }
 127 
 128 size_t ZHeap::used_high() const {
 129   return _page_allocator.used_high();
 130 }
 131 
 132 size_t ZHeap::used_low() const {
 133   return _page_allocator.used_low();
 134 }
 135 
 136 size_t ZHeap::used() const {
 137   return _page_allocator.used();
 138 }
 139 
 140 size_t ZHeap::unused() const {
 141   return _page_allocator.unused();
 142 }
 143 
 144 size_t ZHeap::allocated() const {
 145   return _page_allocator.allocated();
 146 }
 147 
 148 size_t ZHeap::reclaimed() const {
 149   return _page_allocator.reclaimed();
 150 }
 151 
 152 size_t ZHeap::tlab_capacity() const {
 153   return capacity();
 154 }
 155 
 156 size_t ZHeap::tlab_used() const {
 157   return _object_allocator.used();
 158 }
 159 
 160 size_t ZHeap::max_tlab_size() const {
 161   return ZObjectSizeLimitSmall;
 162 }
 163 
 164 size_t ZHeap::unsafe_max_tlab_alloc() const {
 165   size_t size = _object_allocator.remaining();
 166 
 167   if (size < MinTLABSize) {
 168     // The remaining space in the allocator is not enough to
 169     // fit the smallest possible TLAB. This means that the next
 170     // TLAB allocation will force the allocator to get a new
 171     // backing page anyway, which in turn means that we can then
 172     // fit the largest possible TLAB.
 173     size = max_tlab_size();
 174   }
 175 
 176   return MIN2(size, max_tlab_size());
 177 }
 178 
 179 bool ZHeap::is_in(uintptr_t addr) const {
 180   // An address is considered to be "in the heap" if it points into
 181   // the allocated part of a pages, regardless of which heap view is
 182   // used. Note that an address with the finalizable metadata bit set
 183   // is not pointing into a heap view, and therefore not considered
 184   // to be "in the heap".
 185 
 186   if (ZAddress::is_in(addr)) {
 187     const ZPage* const page = _page_table.get(addr);
 188     if (page != NULL) {
 189       return page->is_in(addr);
 190     }
 191   }
 192 
 193   return false;
 194 }
 195 
 196 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 197   const ZPage* const page = _page_table.get(addr);
 198   return page->block_start(addr);
 199 }
 200 
 201 bool ZHeap::block_is_obj(uintptr_t addr) const {
 202   const ZPage* const page = _page_table.get(addr);
 203   return page->block_is_obj(addr);
 204 }
 205 
 206 uint ZHeap::nconcurrent_worker_threads() const {
 207   return _workers.nconcurrent();
 208 }
 209 
 210 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 211   return _workers.nconcurrent_no_boost();
 212 }
 213 
 214 void ZHeap::set_boost_worker_threads(bool boost) {
 215   _workers.set_boost(boost);
 216 }
 217 
 218 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 219   _workers.threads_do(tc);
 220 }
 221 
 222 void ZHeap::print_worker_threads_on(outputStream* st) const {
 223   _workers.print_threads_on(st);
 224 }
 225 
 226 void ZHeap::out_of_memory() {
 227   ResourceMark rm;
 228 
 229   ZStatInc(ZCounterOutOfMemory);
 230   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 231 }
 232 
 233 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 234   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 235   if (page != NULL) {
 236     // Insert page table entry
 237     _page_table.insert(page);
 238   }
 239 
 240   return page;
 241 }
 242 
 243 void ZHeap::undo_alloc_page(ZPage* page) {
 244   assert(page->is_allocating(), "Invalid page state");
 245 
 246   ZStatInc(ZCounterUndoPageAllocation);
 247   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 248                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 249 
 250   free_page(page, false /* reclaimed */);
 251 }
 252 
 253 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 254   // Remove page table entry
 255   _page_table.remove(page);
 256 
 257   // Free page
 258   _page_allocator.free_page(page, reclaimed);
 259 }
 260 
 261 uint64_t ZHeap::uncommit(uint64_t delay) {
 262   return _page_allocator.uncommit(delay);
 263 }
 264 
 265 void ZHeap::before_flip() {
 266   if (ZVerifyViews) {
 267     // Unmap all pages
 268     _page_allocator.debug_unmap_all_pages();
 269   }
 270 }
 271 
 272 void ZHeap::after_flip() {
 273   if (ZVerifyViews) {
 274     // Map all pages
 275     ZPageTableIterator iter(&_page_table);
 276     for (ZPage* page; iter.next(&page);) {
 277       _page_allocator.debug_map_page(page);
 278     }
 279     _page_allocator.debug_map_cached_pages();
 280   }
 281 }
 282 
 283 void ZHeap::flip_to_marked() {
 284   before_flip();
 285   ZAddress::flip_to_marked();
 286   after_flip();
 287 }
 288 
 289 void ZHeap::flip_to_remapped() {
 290   before_flip();
 291   ZAddress::flip_to_remapped();
 292   after_flip();
 293 }
 294 
 295 void ZHeap::mark_start() {
 296   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 297 
 298   // Update statistics
 299   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 300 
 301   // Flip address view
 302   flip_to_marked();
 303 
 304   // Retire allocating pages
 305   _object_allocator.retire_pages();
 306 
 307   // Reset allocated/reclaimed/used statistics
 308   _page_allocator.reset_statistics();
 309 
 310   // Reset encountered/dropped/enqueued statistics
 311   _reference_processor.reset_statistics();
 312 
 313   // Enter mark phase
 314   ZGlobalPhase = ZPhaseMark;
 315 
 316   // Reset marking information and mark roots
 317   _mark.start();
 318 
 319   // Update statistics
 320   ZStatHeap::set_at_mark_start(capacity(), used());
 321 }
 322 
 323 void ZHeap::mark(bool initial) {
 324   _mark.mark(initial);
 325 }
 326 
 327 void ZHeap::mark_flush_and_free(Thread* thread) {
 328   _mark.flush_and_free(thread);
 329 }
 330 
 331 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
 332 public:
 333   virtual void do_oop(oop* p) {
 334     ZBarrier::mark_barrier_on_root_oop_field(p);
 335   }
 336 
 337   virtual void do_oop(narrowOop* p) {
 338     ShouldNotReachHere();
 339   }
 340 };
 341 
 342 class ZFixupPartialLoadsTask : public ZTask {
 343 private:
 344   ZThreadRootsIterator _thread_roots;
 345 
 346 public:
 347   ZFixupPartialLoadsTask() :
 348       ZTask("ZFixupPartialLoadsTask"),
 349       _thread_roots() {}
 350 
 351   virtual void work() {
 352     ZFixupPartialLoadsClosure cl;
 353     _thread_roots.oops_do(&cl);
 354   }
 355 };
 356 
 357 void ZHeap::fixup_partial_loads() {
 358   ZFixupPartialLoadsTask task;
 359   _workers.run_parallel(&task);
 360 }
 361 
 362 bool ZHeap::mark_end() {
 363   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 364 
 365   // C2 can generate code where a safepoint poll is inserted
 366   // between a load and the associated load barrier. To handle
 367   // this case we need to rescan the thread stack here to make
 368   // sure such oops are marked.
 369   fixup_partial_loads();
 370 
 371   // Try end marking
 372   if (!_mark.end()) {
 373     // Marking not completed, continue concurrent mark
 374     return false;
 375   }
 376 
 377   // Enter mark completed phase
 378   ZGlobalPhase = ZPhaseMarkCompleted;
 379 
 380   // Update statistics
 381   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 382   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 383 
 384   // Block resurrection of weak/phantom references
 385   ZResurrection::block();
 386 
 387   // Process weak roots
 388   _weak_roots_processor.process_weak_roots();
 389 
 390   // Prepare to unload unused classes and code
 391   _unload.prepare();
 392 
 393   return true;
 394 }
 395 
 396 void ZHeap::set_soft_reference_policy(bool clear) {
 397   _reference_processor.set_soft_reference_policy(clear);
 398 }
 399 
 400 void ZHeap::process_non_strong_references() {
 401   // Process Soft/Weak/Final/PhantomReferences
 402   _reference_processor.process_references();
 403 
 404   // Process concurrent weak roots
 405   _weak_roots_processor.process_concurrent_weak_roots();
 406 
 407   // Unload unused classes and code
 408   _unload.unload();
 409 
 410   // Unblock resurrection of weak/phantom references
 411   ZResurrection::unblock();
 412 
 413   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 414   // must be done after unblocking resurrection. Otherwise the
 415   // Finalizer thread could call Reference.get() on the Finalizers
 416   // that were just enqueued, which would incorrectly return null
 417   // during the resurrection block window, since such referents
 418   // are only Finalizable marked.
 419   _reference_processor.enqueue_references();
 420 }
 421 
 422 void ZHeap::select_relocation_set() {
 423   // Do not allow pages to be deleted
 424   _page_allocator.enable_deferred_delete();
 425 
 426   // Register relocatable pages with selector
 427   ZRelocationSetSelector selector;
 428   ZPageTableIterator pt_iter(&_page_table);
 429   for (ZPage* page; pt_iter.next(&page);) {
 430     if (!page->is_relocatable()) {
 431       // Not relocatable, don't register
 432       continue;
 433     }
 434 
 435     if (page->is_marked()) {
 436       // Register live page
 437       selector.register_live_page(page);
 438     } else {
 439       // Register garbage page
 440       selector.register_garbage_page(page);
 441 
 442       // Reclaim page immediately
 443       free_page(page, true /* reclaimed */);
 444     }
 445   }
 446 
 447   // Allow pages to be deleted
 448   _page_allocator.disable_deferred_delete();
 449 
 450   // Select pages to relocate
 451   selector.select(&_relocation_set);
 452 
 453   // Setup forwarding table
 454   ZRelocationSetIterator rs_iter(&_relocation_set);
 455   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 456     _forwarding_table.insert(forwarding);
 457   }
 458 
 459   // Update statistics
 460   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 461   ZStatHeap::set_at_select_relocation_set(selector.live(),
 462                                           selector.garbage(),
 463                                           reclaimed());
 464 }
 465 
 466 void ZHeap::reset_relocation_set() {
 467   // Reset forwarding table
 468   ZRelocationSetIterator iter(&_relocation_set);
 469   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 470     _forwarding_table.remove(forwarding);
 471   }
 472 
 473   // Reset relocation set
 474   _relocation_set.reset();
 475 }
 476 
 477 void ZHeap::relocate_start() {
 478   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 479 
 480   // Finish unloading of classes and code
 481   _unload.finish();
 482 
 483   // Flip address view
 484   flip_to_remapped();
 485 
 486   // Enter relocate phase
 487   ZGlobalPhase = ZPhaseRelocate;
 488 
 489   // Update statistics
 490   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 491   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 492 
 493   // Remap/Relocate roots
 494   _relocate.start();
 495 }
 496 
 497 void ZHeap::relocate() {
 498   // Relocate relocation set
 499   const bool success = _relocate.relocate(&_relocation_set);
 500 
 501   // Update statistics
 502   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 503   ZStatRelocation::set_at_relocate_end(success);
 504   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 505                                  used(), used_high(), used_low());
 506 }
 507 
 508 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
 509   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 510 
 511   ZHeapIterator iter(visit_referents);
 512   iter.objects_do(cl);
 513 }
 514 
 515 void ZHeap::serviceability_initialize() {
 516   _serviceability.initialize();
 517 }
 518 
 519 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 520   return _serviceability.memory_manager();
 521 }
 522 
 523 MemoryPool* ZHeap::serviceability_memory_pool() {
 524   return _serviceability.memory_pool();
 525 }
 526 
 527 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 528   return _serviceability.counters();
 529 }
 530 
 531 void ZHeap::print_on(outputStream* st) const {
 532   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 533                used() / M,
 534                capacity() / M,
 535                max_capacity() / M);
 536   MetaspaceUtils::print_on(st);
 537 }
 538 
 539 void ZHeap::print_extended_on(outputStream* st) const {
 540   print_on(st);
 541   st->cr();
 542 
 543   // Do not allow pages to be deleted
 544   _page_allocator.enable_deferred_delete();
 545 
 546   // Print all pages
 547   ZPageTableIterator iter(&_page_table);
 548   for (ZPage* page; iter.next(&page);) {
 549     page->print_on(st);
 550   }
 551 
 552   // Allow pages to be deleted
 553   _page_allocator.enable_deferred_delete();
 554 
 555   st->cr();
 556 }
 557 
 558 class ZVerifyRootsTask : public ZTask {
 559 private:
 560   ZStatTimerDisable  _disable;
 561   ZRootsIterator     _strong_roots;
 562   ZWeakRootsIterator _weak_roots;
 563 
 564 public:
 565   ZVerifyRootsTask() :
 566       ZTask("ZVerifyRootsTask"),
 567       _disable(),
 568       _strong_roots(),
 569       _weak_roots() {}
 570 
 571   virtual void work() {
 572     ZStatTimerDisable disable;
 573     ZVerifyOopClosure cl;
 574     _strong_roots.oops_do(&cl);
 575     _weak_roots.oops_do(&cl);
 576   }
 577 };
 578 
 579 void ZHeap::verify() {
 580   // Heap verification can only be done between mark end and
 581   // relocate start. This is the only window where all oop are
 582   // good and the whole heap is in a consistent state.
 583   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 584 
 585   {
 586     ZVerifyRootsTask task;
 587     _workers.run_parallel(&task);
 588   }
 589 
 590   {
 591     ZVerifyObjectClosure cl;
 592     object_iterate(&cl, false /* visit_referents */);
 593   }
 594 }