1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/oopStorage.hpp"
  26 #include "gc/z/zAddress.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zList.inline.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zMark.inline.hpp"
  33 #include "gc/z/zOopClosures.inline.hpp"
  34 #include "gc/z/zPage.inline.hpp"
  35 #include "gc/z/zPageTable.inline.hpp"
  36 #include "gc/z/zRelocationSet.inline.hpp"
  37 #include "gc/z/zResurrection.hpp"
  38 #include "gc/z/zRootsIterator.hpp"
  39 #include "gc/z/zStat.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zThread.hpp"
  42 #include "gc/z/zTracer.inline.hpp"
  43 #include "gc/z/zVirtualMemory.inline.hpp"
  44 #include "gc/z/zWorkers.inline.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/safepoint.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/debug.hpp"
  52 
  53 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  54 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  55 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  56 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  57 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  58 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  59 
  60 ZHeap* ZHeap::_heap = NULL;
  61 
  62 ZHeap::ZHeap() :
  63     _workers(),
  64     _object_allocator(_workers.nworkers()),
  65     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  66     _pagetable(),
  67     _mark(&_workers, &_pagetable),
  68     _reference_processor(&_workers),
  69     _weak_roots_processor(&_workers),
  70     _relocate(&_workers),
  71     _relocation_set(),
  72     _unload(&_workers),
  73     _serviceability(heap_min_size(), heap_max_size()) {
  74   // Install global heap instance
  75   assert(_heap == NULL, "Already initialized");
  76   _heap = this;
  77 
  78   // Update statistics
  79   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  80 }
  81 
  82 size_t ZHeap::heap_min_size() const {
  83   const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize);
  84   return MIN2(aligned_min_size, heap_max_size());
  85 }
  86 
  87 size_t ZHeap::heap_max_size() const {
  88   const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize);
  89   return MIN2(aligned_max_size, ZAddressOffsetMax);
  90 }
  91 
  92 size_t ZHeap::heap_max_reserve_size() const {
  93   // Reserve one small page per worker plus one shared medium page. This is still just
  94   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  95   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  96   return MIN2(max_reserve_size, heap_max_size());
  97 }
  98 
  99 bool ZHeap::is_initialized() const {
 100   return _page_allocator.is_initialized() && _mark.is_initialized();
 101 }
 102 
 103 size_t ZHeap::min_capacity() const {
 104   return heap_min_size();
 105 }
 106 
 107 size_t ZHeap::max_capacity() const {
 108   return _page_allocator.max_capacity();
 109 }
 110 
 111 size_t ZHeap::current_max_capacity() const {
 112   return _page_allocator.current_max_capacity();
 113 }
 114 
 115 size_t ZHeap::capacity() const {
 116   return _page_allocator.capacity();
 117 }
 118 
 119 size_t ZHeap::max_reserve() const {
 120   return _page_allocator.max_reserve();
 121 }
 122 
 123 size_t ZHeap::used_high() const {
 124   return _page_allocator.used_high();
 125 }
 126 
 127 size_t ZHeap::used_low() const {
 128   return _page_allocator.used_low();
 129 }
 130 
 131 size_t ZHeap::used() const {
 132   return _page_allocator.used();
 133 }
 134 
 135 size_t ZHeap::allocated() const {
 136   return _page_allocator.allocated();
 137 }
 138 
 139 size_t ZHeap::reclaimed() const {
 140   return _page_allocator.reclaimed();
 141 }
 142 
 143 size_t ZHeap::tlab_capacity() const {
 144   return capacity();
 145 }
 146 
 147 size_t ZHeap::tlab_used() const {
 148   return _object_allocator.used();
 149 }
 150 
 151 size_t ZHeap::max_tlab_size() const {
 152   return ZObjectSizeLimitSmall;
 153 }
 154 
 155 size_t ZHeap::unsafe_max_tlab_alloc() const {
 156   size_t size = _object_allocator.remaining();
 157 
 158   if (size < MinTLABSize) {
 159     // The remaining space in the allocator is not enough to
 160     // fit the smallest possible TLAB. This means that the next
 161     // TLAB allocation will force the allocator to get a new
 162     // backing page anyway, which in turn means that we can then
 163     // fit the largest possible TLAB.
 164     size = max_tlab_size();
 165   }
 166 
 167   return MIN2(size, max_tlab_size());
 168 }
 169 
 170 bool ZHeap::is_in(uintptr_t addr) const {
 171   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
 172     return false;
 173   }
 174 
 175   const ZPage* const page = _pagetable.get(addr);
 176   if (page != NULL) {
 177     return page->is_in(addr);
 178   }
 179 
 180   return false;
 181 }
 182 
 183 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 184   const ZPage* const page = _pagetable.get(addr);
 185   return page->block_start(addr);
 186 }
 187 
 188 bool ZHeap::block_is_obj(uintptr_t addr) const {
 189   const ZPage* const page = _pagetable.get(addr);
 190   return page->block_is_obj(addr);
 191 }
 192 
 193 uint ZHeap::nconcurrent_worker_threads() const {
 194   return _workers.nconcurrent();
 195 }
 196 
 197 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 198   return _workers.nconcurrent_no_boost();
 199 }
 200 
 201 void ZHeap::set_boost_worker_threads(bool boost) {
 202   _workers.set_boost(boost);
 203 }
 204 
 205 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 206   _workers.threads_do(tc);
 207 }
 208 
 209 void ZHeap::print_worker_threads_on(outputStream* st) const {
 210   _workers.print_threads_on(st);
 211 }
 212 
 213 void ZHeap::out_of_memory() {
 214   ResourceMark rm;
 215 
 216   ZStatInc(ZCounterOutOfMemory);
 217   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 218 }
 219 
 220 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 221   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 222   if (page != NULL) {
 223     // Update pagetable
 224     _pagetable.insert(page);
 225   }
 226 
 227   return page;
 228 }
 229 
 230 void ZHeap::undo_alloc_page(ZPage* page) {
 231   assert(page->is_allocating(), "Invalid page state");
 232 
 233   ZStatInc(ZCounterUndoPageAllocation);
 234   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 235                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 236 
 237   release_page(page, false /* reclaimed */);
 238 }
 239 
 240 bool ZHeap::retain_page(ZPage* page) {
 241   return page->inc_refcount();
 242 }
 243 
 244 void ZHeap::release_page(ZPage* page, bool reclaimed) {
 245   if (page->dec_refcount()) {
 246     _page_allocator.free_page(page, reclaimed);
 247   }
 248 }
 249 
 250 void ZHeap::before_flip() {
 251   if (ZVerifyViews) {
 252     // Unmap all pages
 253     _page_allocator.unmap_all_pages();
 254   }
 255 }
 256 
 257 void ZHeap::after_flip() {
 258   if (ZVerifyViews) {
 259     // Map all pages
 260     ZPageTableIterator iter(&_pagetable);
 261     for (ZPage* page; iter.next(&page);) {
 262       if (!page->is_detached()) {
 263         _page_allocator.map_page(page);
 264       }
 265     }
 266   }
 267 }
 268 
 269 void ZHeap::flip_to_marked() {
 270   before_flip();
 271   ZAddressMasks::flip_to_marked();
 272   after_flip();
 273 }
 274 
 275 void ZHeap::flip_to_remapped() {
 276   before_flip();
 277   ZAddressMasks::flip_to_remapped();
 278   after_flip();
 279 }
 280 
 281 void ZHeap::mark_start() {
 282   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 283 
 284   // Update statistics
 285   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 286 
 287   // Flip address view
 288   flip_to_marked();
 289 
 290   // Retire allocating pages
 291   _object_allocator.retire_pages();
 292 
 293   // Reset allocated/reclaimed/used statistics
 294   _page_allocator.reset_statistics();
 295 
 296   // Reset encountered/dropped/enqueued statistics
 297   _reference_processor.reset_statistics();
 298 
 299   // Enter mark phase
 300   ZGlobalPhase = ZPhaseMark;
 301 
 302   // Reset marking information and mark roots
 303   _mark.start();
 304 
 305   // Update statistics
 306   ZStatHeap::set_at_mark_start(capacity(), used());
 307 }
 308 
 309 void ZHeap::mark(bool initial) {
 310   _mark.mark(initial);
 311 }
 312 
 313 void ZHeap::mark_flush_and_free(Thread* thread) {
 314   _mark.flush_and_free(thread);
 315 }
 316 
 317 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
 318 public:
 319   virtual void do_oop(oop* p) {
 320     ZBarrier::mark_barrier_on_root_oop_field(p);
 321   }
 322 
 323   virtual void do_oop(narrowOop* p) {
 324     ShouldNotReachHere();
 325   }
 326 };
 327 
 328 class ZFixupPartialLoadsTask : public ZTask {
 329 private:
 330   ZThreadRootsIterator _thread_roots;
 331 
 332 public:
 333   ZFixupPartialLoadsTask() :
 334       ZTask("ZFixupPartialLoadsTask"),
 335       _thread_roots() {}
 336 
 337   virtual void work() {
 338     ZFixupPartialLoadsClosure cl;
 339     _thread_roots.oops_do(&cl);
 340   }
 341 };
 342 
 343 void ZHeap::fixup_partial_loads() {
 344   ZFixupPartialLoadsTask task;
 345   _workers.run_parallel(&task);
 346 }
 347 
 348 bool ZHeap::mark_end() {
 349   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 350 
 351   // C2 can generate code where a safepoint poll is inserted
 352   // between a load and the associated load barrier. To handle
 353   // this case we need to rescan the thread stack here to make
 354   // sure such oops are marked.
 355   fixup_partial_loads();
 356 
 357   // Try end marking
 358   if (!_mark.end()) {
 359     // Marking not completed, continue concurrent mark
 360     return false;
 361   }
 362 
 363   // Enter mark completed phase
 364   ZGlobalPhase = ZPhaseMarkCompleted;
 365 
 366   // Update statistics
 367   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 368   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 369 
 370   // Block resurrection of weak/phantom references
 371   ZResurrection::block();
 372 
 373   // Process weak roots
 374   _weak_roots_processor.process_weak_roots();
 375 
 376   // Prepare to unload unused classes and code
 377   _unload.prepare();
 378 
 379   return true;
 380 }
 381 
 382 void ZHeap::set_soft_reference_policy(bool clear) {
 383   _reference_processor.set_soft_reference_policy(clear);
 384 }
 385 
 386 void ZHeap::process_non_strong_references() {
 387   // Process Soft/Weak/Final/PhantomReferences
 388   _reference_processor.process_references();
 389 
 390   // Process concurrent weak roots
 391   _weak_roots_processor.process_concurrent_weak_roots();
 392 
 393   // Unload unused classes and code
 394   _unload.unload();
 395 
 396   // Unblock resurrection of weak/phantom references
 397   ZResurrection::unblock();
 398 
 399   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 400   // must be done after unblocking resurrection. Otherwise the
 401   // Finalizer thread could call Reference.get() on the Finalizers
 402   // that were just enqueued, which would incorrectly return null
 403   // during the resurrection block window, since such referents
 404   // are only Finalizable marked.
 405   _reference_processor.enqueue_references();
 406 }
 407 
 408 void ZHeap::destroy_detached_pages() {
 409   ZList<ZPage> list;
 410 
 411   _page_allocator.flush_detached_pages(&list);
 412 
 413   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 414     // Remove pagetable entry
 415     _pagetable.remove(page);
 416 
 417     // Delete the page
 418     _page_allocator.destroy_page(page);
 419   }
 420 }
 421 
 422 void ZHeap::select_relocation_set() {
 423   // Register relocatable pages with selector
 424   ZRelocationSetSelector selector;
 425   ZPageTableIterator iter(&_pagetable);
 426   for (ZPage* page; iter.next(&page);) {
 427     if (!page->is_relocatable()) {
 428       // Not relocatable, don't register
 429       continue;
 430     }
 431 
 432     if (page->is_marked()) {
 433       // Register live page
 434       selector.register_live_page(page);
 435     } else {
 436       // Register garbage page
 437       selector.register_garbage_page(page);
 438 
 439       // Reclaim page immediately
 440       release_page(page, true /* reclaimed */);
 441     }
 442   }
 443 
 444   // Select pages to relocate
 445   selector.select(&_relocation_set);
 446 
 447   // Update statistics
 448   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 449   ZStatHeap::set_at_select_relocation_set(selector.live(),
 450                                           selector.garbage(),
 451                                           reclaimed());
 452 }
 453 
 454 void ZHeap::prepare_relocation_set() {
 455   ZRelocationSetIterator iter(&_relocation_set);
 456   for (ZPage* page; iter.next(&page);) {
 457     // Prepare for relocation
 458     page->set_forwarding();
 459 
 460     // Update pagetable
 461     _pagetable.set_relocating(page);
 462   }
 463 }
 464 
 465 void ZHeap::reset_relocation_set() {
 466   ZRelocationSetIterator iter(&_relocation_set);
 467   for (ZPage* page; iter.next(&page);) {
 468     // Reset relocation information
 469     page->reset_forwarding();
 470 
 471     // Update pagetable
 472     _pagetable.clear_relocating(page);
 473   }
 474 }
 475 
 476 void ZHeap::relocate_start() {
 477   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 478 
 479   // Finish unloading of classes and code
 480   _unload.finish();
 481 
 482   // Flip address view
 483   flip_to_remapped();
 484 
 485   // Enter relocate phase
 486   ZGlobalPhase = ZPhaseRelocate;
 487 
 488   // Update statistics
 489   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 490   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 491 
 492   // Remap/Relocate roots
 493   _relocate.start();
 494 }
 495 
 496 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
 497   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
 498   ZPage* const page = _pagetable.get(addr);
 499   const bool retained = retain_page(page);
 500   const uintptr_t new_addr = page->relocate_object(addr);
 501   if (retained) {
 502     release_page(page, true /* reclaimed */);
 503   }
 504 
 505   return new_addr;
 506 }
 507 
 508 uintptr_t ZHeap::forward_object(uintptr_t addr) {
 509   assert(ZGlobalPhase == ZPhaseMark ||
 510          ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
 511   ZPage* const page = _pagetable.get(addr);
 512   return page->forward_object(addr);
 513 }
 514 
 515 void ZHeap::relocate() {
 516   // Relocate relocation set
 517   const bool success = _relocate.relocate(&_relocation_set);
 518 
 519   // Update statistics
 520   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 521   ZStatRelocation::set_at_relocate_end(success);
 522   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 523                                  used(), used_high(), used_low());
 524 }
 525 
 526 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
 527   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 528 
 529   ZHeapIterator iter(visit_referents);
 530   iter.objects_do(cl);
 531 }
 532 
 533 void ZHeap::serviceability_initialize() {
 534   _serviceability.initialize();
 535 }
 536 
 537 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 538   return _serviceability.memory_manager();
 539 }
 540 
 541 MemoryPool* ZHeap::serviceability_memory_pool() {
 542   return _serviceability.memory_pool();
 543 }
 544 
 545 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 546   return _serviceability.counters();
 547 }
 548 
 549 void ZHeap::print_on(outputStream* st) const {
 550   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 551                used() / M,
 552                capacity() / M,
 553                max_capacity() / M);
 554   MetaspaceUtils::print_on(st);
 555 }
 556 
 557 void ZHeap::print_extended_on(outputStream* st) const {
 558   print_on(st);
 559   st->cr();
 560 
 561   ZPageTableIterator iter(&_pagetable);
 562   for (ZPage* page; iter.next(&page);) {
 563     page->print_on(st);
 564   }
 565 
 566   st->cr();
 567 }
 568 
 569 class ZVerifyRootsTask : public ZTask {
 570 private:
 571   ZRootsIterator     _strong_roots;
 572   ZWeakRootsIterator _weak_roots;
 573 
 574 public:
 575   ZVerifyRootsTask() :
 576       ZTask("ZVerifyRootsTask"),
 577       _strong_roots(),
 578       _weak_roots() {}
 579 
 580   virtual void work() {
 581     ZVerifyOopClosure cl;
 582     _strong_roots.oops_do(&cl);
 583     _weak_roots.oops_do(&cl);
 584   }
 585 };
 586 
 587 void ZHeap::verify() {
 588   // Heap verification can only be done between mark end and
 589   // relocate start. This is the only window where all oop are
 590   // good and the whole heap is in a consistent state.
 591   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 592 
 593   {
 594     ZVerifyRootsTask task;
 595     _workers.run_parallel(&task);
 596   }
 597 
 598   {
 599     ZVerifyObjectClosure cl;
 600     object_iterate(&cl, false /* visit_referents */);
 601   }
 602 }