1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/oopStorage.hpp"
  26 #include "gc/z/zAddress.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zList.inline.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zMark.inline.hpp"
  33 #include "gc/z/zOopClosures.inline.hpp"
  34 #include "gc/z/zPage.inline.hpp"
  35 #include "gc/z/zPageTable.inline.hpp"
  36 #include "gc/z/zRelocationSet.inline.hpp"
  37 #include "gc/z/zResurrection.hpp"
  38 #include "gc/z/zRootsIterator.hpp"
  39 #include "gc/z/zStat.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zThread.hpp"
  42 #include "gc/z/zTracer.inline.hpp"
  43 #include "gc/z/zVirtualMemory.inline.hpp"
  44 #include "gc/z/zWorkers.inline.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/safepoint.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/debug.hpp"
  52 
  53 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  54 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  55 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  56 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  57 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  58 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  59 
  60 ZHeap* ZHeap::_heap = NULL;
  61 
  62 ZHeap::ZHeap() :
  63     _workers(),
  64     _object_allocator(_workers.nworkers()),
  65     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  66     _page_table(),
  67     _forwarding_table(),
  68     _mark(&_workers, &_page_table),
  69     _reference_processor(&_workers),
  70     _weak_roots_processor(&_workers),
  71     _relocate(&_workers),
  72     _relocation_set(),
  73     _unload(&_workers),
  74     _serviceability(heap_min_size(), heap_max_size()) {
  75   // Install global heap instance
  76   assert(_heap == NULL, "Already initialized");
  77   _heap = this;
  78 
  79   // Update statistics
  80   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  81 }
  82 
  83 size_t ZHeap::heap_min_size() const {
  84   const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize);
  85   return MIN2(aligned_min_size, heap_max_size());
  86 }
  87 
  88 size_t ZHeap::heap_max_size() const {
  89   const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize);
  90   return MIN2(aligned_max_size, ZAddressOffsetMax);
  91 }
  92 
  93 size_t ZHeap::heap_max_reserve_size() const {
  94   // Reserve one small page per worker plus one shared medium page. This is still just
  95   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  96   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  97   return MIN2(max_reserve_size, heap_max_size());
  98 }
  99 
 100 bool ZHeap::is_initialized() const {
 101   return _page_allocator.is_initialized() && _mark.is_initialized();
 102 }
 103 
 104 size_t ZHeap::min_capacity() const {
 105   return heap_min_size();
 106 }
 107 
 108 size_t ZHeap::max_capacity() const {
 109   return _page_allocator.max_capacity();
 110 }
 111 
 112 size_t ZHeap::current_max_capacity() const {
 113   return _page_allocator.current_max_capacity();
 114 }
 115 
 116 size_t ZHeap::capacity() const {
 117   return _page_allocator.capacity();
 118 }
 119 
 120 size_t ZHeap::max_reserve() const {
 121   return _page_allocator.max_reserve();
 122 }
 123 
 124 size_t ZHeap::used_high() const {
 125   return _page_allocator.used_high();
 126 }
 127 
 128 size_t ZHeap::used_low() const {
 129   return _page_allocator.used_low();
 130 }
 131 
 132 size_t ZHeap::used() const {
 133   return _page_allocator.used();
 134 }
 135 
 136 size_t ZHeap::allocated() const {
 137   return _page_allocator.allocated();
 138 }
 139 
 140 size_t ZHeap::reclaimed() const {
 141   return _page_allocator.reclaimed();
 142 }
 143 
 144 size_t ZHeap::tlab_capacity() const {
 145   return capacity();
 146 }
 147 
 148 size_t ZHeap::tlab_used() const {
 149   return _object_allocator.used();
 150 }
 151 
 152 size_t ZHeap::max_tlab_size() const {
 153   return ZObjectSizeLimitSmall;
 154 }
 155 
 156 size_t ZHeap::unsafe_max_tlab_alloc() const {
 157   size_t size = _object_allocator.remaining();
 158 
 159   if (size < MinTLABSize) {
 160     // The remaining space in the allocator is not enough to
 161     // fit the smallest possible TLAB. This means that the next
 162     // TLAB allocation will force the allocator to get a new
 163     // backing page anyway, which in turn means that we can then
 164     // fit the largest possible TLAB.
 165     size = max_tlab_size();
 166   }
 167 
 168   return MIN2(size, max_tlab_size());
 169 }
 170 
 171 bool ZHeap::is_in(uintptr_t addr) const {
 172   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
 173     return false;
 174   }
 175 
 176   const ZPage* const page = _page_table.get(addr);
 177   if (page != NULL) {
 178     return page->is_in(addr);
 179   }
 180 
 181   return false;
 182 }
 183 
 184 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 185   const ZPage* const page = _page_table.get(addr);
 186   return page->block_start(addr);
 187 }
 188 
 189 bool ZHeap::block_is_obj(uintptr_t addr) const {
 190   const ZPage* const page = _page_table.get(addr);
 191   return page->block_is_obj(addr);
 192 }
 193 
 194 uint ZHeap::nconcurrent_worker_threads() const {
 195   return _workers.nconcurrent();
 196 }
 197 
 198 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 199   return _workers.nconcurrent_no_boost();
 200 }
 201 
 202 void ZHeap::set_boost_worker_threads(bool boost) {
 203   _workers.set_boost(boost);
 204 }
 205 
 206 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 207   _workers.threads_do(tc);
 208 }
 209 
 210 void ZHeap::print_worker_threads_on(outputStream* st) const {
 211   _workers.print_threads_on(st);
 212 }
 213 
 214 void ZHeap::out_of_memory() {
 215   ResourceMark rm;
 216 
 217   ZStatInc(ZCounterOutOfMemory);
 218   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 219 }
 220 
 221 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 222   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 223   if (page != NULL) {
 224     // Insert page table entry
 225     _page_table.insert(page);
 226   }
 227 
 228   return page;
 229 }
 230 
 231 void ZHeap::undo_alloc_page(ZPage* page) {
 232   assert(page->is_allocating(), "Invalid page state");
 233 
 234   ZStatInc(ZCounterUndoPageAllocation);
 235   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 236                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 237 
 238   free_page(page, false /* reclaimed */);
 239 }
 240 
 241 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 242   // Remove page table entry
 243   _page_table.remove(page);
 244 
 245   // Free page
 246   _page_allocator.free_page(page, reclaimed);
 247 }
 248 
 249 void ZHeap::before_flip() {
 250   if (ZVerifyViews) {
 251     // Unmap all pages
 252     _page_allocator.unmap_all_pages();
 253   }
 254 }
 255 
 256 void ZHeap::after_flip() {
 257   if (ZVerifyViews) {
 258     // Map all pages
 259     ZPageTableIterator iter(&_page_table);
 260     for (ZPage* page; iter.next(&page);) {
 261       _page_allocator.map_page(page);
 262     }
 263   }
 264 }
 265 
 266 void ZHeap::flip_to_marked() {
 267   before_flip();
 268   ZAddressMasks::flip_to_marked();
 269   after_flip();
 270 }
 271 
 272 void ZHeap::flip_to_remapped() {
 273   before_flip();
 274   ZAddressMasks::flip_to_remapped();
 275   after_flip();
 276 }
 277 
 278 void ZHeap::mark_start() {
 279   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 280 
 281   // Update statistics
 282   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 283 
 284   // Flip address view
 285   flip_to_marked();
 286 
 287   // Retire allocating pages
 288   _object_allocator.retire_pages();
 289 
 290   // Reset allocated/reclaimed/used statistics
 291   _page_allocator.reset_statistics();
 292 
 293   // Reset encountered/dropped/enqueued statistics
 294   _reference_processor.reset_statistics();
 295 
 296   // Enter mark phase
 297   ZGlobalPhase = ZPhaseMark;
 298 
 299   // Reset marking information and mark roots
 300   _mark.start();
 301 
 302   // Update statistics
 303   ZStatHeap::set_at_mark_start(capacity(), used());
 304 }
 305 
 306 void ZHeap::mark(bool initial) {
 307   _mark.mark(initial);
 308 }
 309 
 310 void ZHeap::mark_flush_and_free(Thread* thread) {
 311   _mark.flush_and_free(thread);
 312 }
 313 
 314 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
 315 public:
 316   virtual void do_oop(oop* p) {
 317     ZBarrier::mark_barrier_on_root_oop_field(p);
 318   }
 319 
 320   virtual void do_oop(narrowOop* p) {
 321     ShouldNotReachHere();
 322   }
 323 };
 324 
 325 class ZFixupPartialLoadsTask : public ZTask {
 326 private:
 327   ZThreadRootsIterator _thread_roots;
 328 
 329 public:
 330   ZFixupPartialLoadsTask() :
 331       ZTask("ZFixupPartialLoadsTask"),
 332       _thread_roots() {}
 333 
 334   virtual void work() {
 335     ZFixupPartialLoadsClosure cl;
 336     _thread_roots.oops_do(&cl);
 337   }
 338 };
 339 
 340 void ZHeap::fixup_partial_loads() {
 341   ZFixupPartialLoadsTask task;
 342   _workers.run_parallel(&task);
 343 }
 344 
 345 bool ZHeap::mark_end() {
 346   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 347 
 348   // C2 can generate code where a safepoint poll is inserted
 349   // between a load and the associated load barrier. To handle
 350   // this case we need to rescan the thread stack here to make
 351   // sure such oops are marked.
 352   fixup_partial_loads();
 353 
 354   // Try end marking
 355   if (!_mark.end()) {
 356     // Marking not completed, continue concurrent mark
 357     return false;
 358   }
 359 
 360   // Enter mark completed phase
 361   ZGlobalPhase = ZPhaseMarkCompleted;
 362 
 363   // Update statistics
 364   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 365   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 366 
 367   // Block resurrection of weak/phantom references
 368   ZResurrection::block();
 369 
 370   // Process weak roots
 371   _weak_roots_processor.process_weak_roots();
 372 
 373   // Prepare to unload unused classes and code
 374   _unload.prepare();
 375 
 376   return true;
 377 }
 378 
 379 void ZHeap::set_soft_reference_policy(bool clear) {
 380   _reference_processor.set_soft_reference_policy(clear);
 381 }
 382 
 383 void ZHeap::process_non_strong_references() {
 384   // Process Soft/Weak/Final/PhantomReferences
 385   _reference_processor.process_references();
 386 
 387   // Process concurrent weak roots
 388   _weak_roots_processor.process_concurrent_weak_roots();
 389 
 390   // Unload unused classes and code
 391   _unload.unload();
 392 
 393   // Unblock resurrection of weak/phantom references
 394   ZResurrection::unblock();
 395 
 396   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 397   // must be done after unblocking resurrection. Otherwise the
 398   // Finalizer thread could call Reference.get() on the Finalizers
 399   // that were just enqueued, which would incorrectly return null
 400   // during the resurrection block window, since such referents
 401   // are only Finalizable marked.
 402   _reference_processor.enqueue_references();
 403 }
 404 
 405 void ZHeap::select_relocation_set() {
 406   // Do not allow pages to be deleted
 407   _page_allocator.enable_deferred_delete();
 408 
 409   // Register relocatable pages with selector
 410   ZRelocationSetSelector selector;
 411   ZPageTableIterator pt_iter(&_page_table);
 412   for (ZPage* page; pt_iter.next(&page);) {
 413     if (!page->is_relocatable()) {
 414       // Not relocatable, don't register
 415       continue;
 416     }
 417 
 418     if (page->is_marked()) {
 419       // Register live page
 420       selector.register_live_page(page);
 421     } else {
 422       // Register garbage page
 423       selector.register_garbage_page(page);
 424 
 425       // Reclaim page immediately
 426       free_page(page, true /* reclaimed */);
 427     }
 428   }
 429 
 430   // Allow pages to be deleted
 431   _page_allocator.disable_deferred_delete();
 432 
 433   // Select pages to relocate
 434   selector.select(&_relocation_set);
 435 
 436   // Setup forwarding table
 437   ZRelocationSetIterator rs_iter(&_relocation_set);
 438   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 439     _forwarding_table.insert(forwarding);
 440   }
 441 
 442   // Update statistics
 443   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 444   ZStatHeap::set_at_select_relocation_set(selector.live(),
 445                                           selector.garbage(),
 446                                           reclaimed());
 447 }
 448 
 449 void ZHeap::reset_relocation_set() {
 450   // Reset forwarding table
 451   ZRelocationSetIterator iter(&_relocation_set);
 452   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 453     _forwarding_table.remove(forwarding);
 454   }
 455 
 456   // Reset relocation set
 457   _relocation_set.reset();
 458 }
 459 
 460 void ZHeap::relocate_start() {
 461   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 462 
 463   // Finish unloading of classes and code
 464   _unload.finish();
 465 
 466   // Flip address view
 467   flip_to_remapped();
 468 
 469   // Enter relocate phase
 470   ZGlobalPhase = ZPhaseRelocate;
 471 
 472   // Update statistics
 473   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 474   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 475 
 476   // Remap/Relocate roots
 477   _relocate.start();
 478 }
 479 
 480 void ZHeap::relocate() {
 481   // Relocate relocation set
 482   const bool success = _relocate.relocate(&_relocation_set);
 483 
 484   // Update statistics
 485   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 486   ZStatRelocation::set_at_relocate_end(success);
 487   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 488                                  used(), used_high(), used_low());
 489 }
 490 
 491 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
 492   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 493 
 494   ZHeapIterator iter(visit_referents);
 495   iter.objects_do(cl);
 496 }
 497 
 498 void ZHeap::serviceability_initialize() {
 499   _serviceability.initialize();
 500 }
 501 
 502 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 503   return _serviceability.memory_manager();
 504 }
 505 
 506 MemoryPool* ZHeap::serviceability_memory_pool() {
 507   return _serviceability.memory_pool();
 508 }
 509 
 510 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 511   return _serviceability.counters();
 512 }
 513 
 514 void ZHeap::print_on(outputStream* st) const {
 515   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 516                used() / M,
 517                capacity() / M,
 518                max_capacity() / M);
 519   MetaspaceUtils::print_on(st);
 520 }
 521 
 522 void ZHeap::print_extended_on(outputStream* st) const {
 523   print_on(st);
 524   st->cr();
 525 
 526   // Do not allow pages to be deleted
 527   _page_allocator.enable_deferred_delete();
 528 
 529   // Print all pages
 530   ZPageTableIterator iter(&_page_table);
 531   for (ZPage* page; iter.next(&page);) {
 532     page->print_on(st);
 533   }
 534 
 535   // Allow pages to be deleted
 536   _page_allocator.enable_deferred_delete();
 537 
 538   st->cr();
 539 }
 540 
 541 class ZVerifyRootsTask : public ZTask {
 542 private:
 543   ZStatTimerDisable  _disable;
 544   ZRootsIterator     _strong_roots;
 545   ZWeakRootsIterator _weak_roots;
 546 
 547 public:
 548   ZVerifyRootsTask() :
 549       ZTask("ZVerifyRootsTask"),
 550       _disable(),
 551       _strong_roots(),
 552       _weak_roots() {}
 553 
 554   virtual void work() {
 555     ZStatTimerDisable disable;
 556     ZVerifyOopClosure cl;
 557     _strong_roots.oops_do(&cl);
 558     _weak_roots.oops_do(&cl);
 559   }
 560 };
 561 
 562 void ZHeap::verify() {
 563   // Heap verification can only be done between mark end and
 564   // relocate start. This is the only window where all oop are
 565   // good and the whole heap is in a consistent state.
 566   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 567 
 568   {
 569     ZVerifyRootsTask task;
 570     _workers.run_parallel(&task);
 571   }
 572 
 573   {
 574     ZVerifyObjectClosure cl;
 575     object_iterate(&cl, false /* visit_referents */);
 576   }
 577 }