1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/oopStorage.hpp"
  26 #include "gc/z/zAddress.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zList.inline.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zMark.inline.hpp"
  33 #include "gc/z/zOopClosures.inline.hpp"
  34 #include "gc/z/zPage.inline.hpp"
  35 #include "gc/z/zPageTable.inline.hpp"
  36 #include "gc/z/zRelocationSet.inline.hpp"
  37 #include "gc/z/zResurrection.hpp"
  38 #include "gc/z/zRootsIterator.hpp"
  39 #include "gc/z/zStat.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zThread.hpp"
  42 #include "gc/z/zTracer.inline.hpp"
  43 #include "gc/z/zVirtualMemory.inline.hpp"
  44 #include "gc/z/zWorkers.inline.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/safepoint.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/debug.hpp"
  52 
  53 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  54 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  55 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  56 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  57 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  58 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  59 
  60 ZHeap* ZHeap::_heap = NULL;
  61 
  62 ZHeap::ZHeap() :
  63     _workers(),
  64     _object_allocator(_workers.nworkers()),
  65     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  66     _pagetable(),
  67     _mark(&_workers, &_pagetable),
  68     _reference_processor(&_workers),
  69     _weak_roots_processor(&_workers),
  70     _relocate(&_workers),
  71     _relocation_set(),
  72     _serviceability(heap_min_size(), heap_max_size()) {
  73   // Install global heap instance
  74   assert(_heap == NULL, "Already initialized");
  75   _heap = this;
  76 
  77   // Update statistics
  78   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  79 }
  80 
  81 size_t ZHeap::heap_min_size() const {
  82   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
  83   return MIN2(aligned_min_size, heap_max_size());
  84 }
  85 
  86 size_t ZHeap::heap_max_size() const {
  87   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
  88   return MIN2(aligned_max_size, ZAddressOffsetMax);
  89 }
  90 
  91 size_t ZHeap::heap_max_reserve_size() const {
  92   // Reserve one small page per worker plus one shared medium page. This is still just
  93   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  94   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  95   return MIN2(max_reserve_size, heap_max_size());
  96 }
  97 
  98 bool ZHeap::is_initialized() const {
  99   return _page_allocator.is_initialized() && _mark.is_initialized();
 100 }
 101 
 102 size_t ZHeap::min_capacity() const {
 103   return heap_min_size();
 104 }
 105 
 106 size_t ZHeap::max_capacity() const {
 107   return _page_allocator.max_capacity();
 108 }
 109 
 110 size_t ZHeap::current_max_capacity() const {
 111   return _page_allocator.current_max_capacity();
 112 }
 113 
 114 size_t ZHeap::capacity() const {
 115   return _page_allocator.capacity();
 116 }
 117 
 118 size_t ZHeap::max_reserve() const {
 119   return _page_allocator.max_reserve();
 120 }
 121 
 122 size_t ZHeap::used_high() const {
 123   return _page_allocator.used_high();
 124 }
 125 
 126 size_t ZHeap::used_low() const {
 127   return _page_allocator.used_low();
 128 }
 129 
 130 size_t ZHeap::used() const {
 131   return _page_allocator.used();
 132 }
 133 
 134 size_t ZHeap::allocated() const {
 135   return _page_allocator.allocated();
 136 }
 137 
 138 size_t ZHeap::reclaimed() const {
 139   return _page_allocator.reclaimed();
 140 }
 141 
 142 size_t ZHeap::tlab_capacity() const {
 143   return capacity();
 144 }
 145 
 146 size_t ZHeap::tlab_used() const {
 147   return _object_allocator.used();
 148 }
 149 
 150 size_t ZHeap::max_tlab_size() const {
 151   return ZObjectSizeLimitSmall;
 152 }
 153 
 154 size_t ZHeap::unsafe_max_tlab_alloc() const {
 155   size_t size = _object_allocator.remaining();
 156 
 157   if (size < MinTLABSize) {
 158     // The remaining space in the allocator is not enough to
 159     // fit the smallest possible TLAB. This means that the next
 160     // TLAB allocation will force the allocator to get a new
 161     // backing page anyway, which in turn means that we can then
 162     // fit the largest possible TLAB.
 163     size = max_tlab_size();
 164   }
 165 
 166   return MIN2(size, max_tlab_size());
 167 }
 168 
 169 bool ZHeap::is_in(uintptr_t addr) const {
 170   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
 171     return false;
 172   }
 173 
 174   const ZPage* const page = _pagetable.get(addr);
 175   if (page != NULL) {
 176     return page->is_in(addr);
 177   }
 178 
 179   return false;
 180 }
 181 
 182 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 183   const ZPage* const page = _pagetable.get(addr);
 184   return page->block_start(addr);
 185 }
 186 
 187 size_t ZHeap::block_size(uintptr_t addr) const {
 188   const ZPage* const page = _pagetable.get(addr);
 189   return page->block_size(addr);
 190 }
 191 
 192 bool ZHeap::block_is_obj(uintptr_t addr) const {
 193   const ZPage* const page = _pagetable.get(addr);
 194   return page->block_is_obj(addr);
 195 }
 196 
 197 uint ZHeap::nconcurrent_worker_threads() const {
 198   return _workers.nconcurrent();
 199 }
 200 
 201 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 202   return _workers.nconcurrent_no_boost();
 203 }
 204 
 205 void ZHeap::set_boost_worker_threads(bool boost) {
 206   _workers.set_boost(boost);
 207 }
 208 
 209 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 210   _workers.threads_do(tc);
 211 }
 212 
 213 void ZHeap::print_worker_threads_on(outputStream* st) const {
 214   _workers.print_threads_on(st);
 215 }
 216 
 217 void ZHeap::out_of_memory() {
 218   ResourceMark rm;
 219 
 220   ZStatInc(ZCounterOutOfMemory);
 221   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 222 }
 223 
 224 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 225   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 226   if (page != NULL) {
 227     // Update pagetable
 228     _pagetable.insert(page);
 229   }
 230 
 231   return page;
 232 }
 233 
 234 void ZHeap::undo_alloc_page(ZPage* page) {
 235   assert(page->is_allocating(), "Invalid page state");
 236 
 237   ZStatInc(ZCounterUndoPageAllocation);
 238   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 239                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 240 
 241   release_page(page, false /* reclaimed */);
 242 }
 243 
 244 bool ZHeap::retain_page(ZPage* page) {
 245   return page->inc_refcount();
 246 }
 247 
 248 void ZHeap::release_page(ZPage* page, bool reclaimed) {
 249   if (page->dec_refcount()) {
 250     _page_allocator.free_page(page, reclaimed);
 251   }
 252 }
 253 
 254 void ZHeap::flip_views() {
 255   // For debugging only
 256   if (ZUnmapBadViews) {
 257     // Flip pages
 258     ZPageTableIterator iter(&_pagetable);
 259     for (ZPage* page; iter.next(&page);) {
 260       if (!page->is_detached()) {
 261         _page_allocator.flip_page(page);
 262       }
 263     }
 264 
 265     // Flip pre-mapped memory
 266     _page_allocator.flip_pre_mapped();
 267   }
 268 }
 269 
 270 void ZHeap::mark_start() {
 271   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 272 
 273   // Update statistics
 274   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 275 
 276   // Flip address view
 277   ZAddressMasks::flip_to_marked();
 278   flip_views();
 279 
 280   // Retire allocating pages
 281   _object_allocator.retire_pages();
 282 
 283   // Reset allocated/reclaimed/used statistics
 284   _page_allocator.reset_statistics();
 285 
 286   // Reset encountered/dropped/enqueued statistics
 287   _reference_processor.reset_statistics();
 288 
 289   // Enter mark phase
 290   ZGlobalPhase = ZPhaseMark;
 291 
 292   // Reset marking information and mark roots
 293   _mark.start();
 294 
 295   // Update statistics
 296   ZStatHeap::set_at_mark_start(capacity(), used());
 297 }
 298 
 299 void ZHeap::mark(bool initial) {
 300   _mark.mark(initial);
 301 }
 302 
 303 void ZHeap::mark_flush_and_free(Thread* thread) {
 304   _mark.flush_and_free(thread);
 305 }
 306 
 307 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
 308 public:
 309   virtual void do_oop(oop* p) {
 310     ZBarrier::mark_barrier_on_root_oop_field(p);
 311   }
 312 
 313   virtual void do_oop(narrowOop* p) {
 314     ShouldNotReachHere();
 315   }
 316 };
 317 
 318 class ZFixupPartialLoadsTask : public ZTask {
 319 private:
 320   ZThreadRootsIterator _thread_roots;
 321 
 322 public:
 323   ZFixupPartialLoadsTask() :
 324       ZTask("ZFixupPartialLoadsTask"),
 325       _thread_roots() {}
 326 
 327   virtual void work() {
 328     ZFixupPartialLoadsClosure cl;
 329     _thread_roots.oops_do(&cl);
 330   }
 331 };
 332 
 333 void ZHeap::fixup_partial_loads() {
 334   ZFixupPartialLoadsTask task;
 335   _workers.run_parallel(&task);
 336 }
 337 
 338 bool ZHeap::mark_end() {
 339   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 340 
 341   // C2 can generate code where a safepoint poll is inserted
 342   // between a load and the associated load barrier. To handle
 343   // this case we need to rescan the thread stack here to make
 344   // sure such oops are marked.
 345   fixup_partial_loads();
 346 
 347   // Try end marking
 348   if (!_mark.end()) {
 349     // Marking not completed, continue concurrent mark
 350     return false;
 351   }
 352 
 353   // Enter mark completed phase
 354   ZGlobalPhase = ZPhaseMarkCompleted;
 355 
 356   // Resize metaspace
 357   MetaspaceGC::compute_new_size();
 358 
 359   // Update statistics
 360   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 361   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 362 
 363   // Block resurrection of weak/phantom references
 364   ZResurrection::block();
 365 
 366   // Process weak roots
 367   _weak_roots_processor.process_weak_roots();
 368 
 369   // Verification
 370   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
 371     Universe::verify();
 372   }
 373 
 374   return true;
 375 }
 376 
 377 void ZHeap::set_soft_reference_policy(bool clear) {
 378   _reference_processor.set_soft_reference_policy(clear);
 379 }
 380 
 381 void ZHeap::process_non_strong_references() {
 382   // Process Soft/Weak/Final/PhantomReferences
 383   _reference_processor.process_references();
 384 
 385   // Process concurrent weak roots
 386   _weak_roots_processor.process_concurrent_weak_roots();
 387 
 388   // Unblock resurrection of weak/phantom references
 389   ZResurrection::unblock();
 390 
 391   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 392   // must be done after unblocking resurrection. Otherwise the
 393   // Finalizer thread could call Reference.get() on the Finalizers
 394   // that were just enqueued, which would incorrectly return null
 395   // during the resurrection block window, since such referents
 396   // are only Finalizable marked.
 397   _reference_processor.enqueue_references();
 398 }
 399 
 400 void ZHeap::destroy_detached_pages() {
 401   ZList<ZPage> list;
 402 
 403   _page_allocator.flush_detached_pages(&list);
 404 
 405   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 406     // Remove pagetable entry
 407     _pagetable.remove(page);
 408 
 409     // Delete the page
 410     _page_allocator.destroy_page(page);
 411   }
 412 }
 413 
 414 void ZHeap::select_relocation_set() {
 415   // Register relocatable pages with selector
 416   ZRelocationSetSelector selector;
 417   ZPageTableIterator iter(&_pagetable);
 418   for (ZPage* page; iter.next(&page);) {
 419     if (!page->is_relocatable()) {
 420       // Not relocatable, don't register
 421       continue;
 422     }
 423 
 424     if (page->is_marked()) {
 425       // Register live page
 426       selector.register_live_page(page);
 427     } else {
 428       // Register garbage page
 429       selector.register_garbage_page(page);
 430 
 431       // Reclaim page immediately
 432       release_page(page, true /* reclaimed */);
 433     }
 434   }
 435 
 436   // Select pages to relocate
 437   selector.select(&_relocation_set);
 438 
 439   // Update statistics
 440   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 441   ZStatHeap::set_at_select_relocation_set(selector.live(),
 442                                           selector.garbage(),
 443                                           reclaimed());
 444 }
 445 
 446 void ZHeap::prepare_relocation_set() {
 447   ZRelocationSetIterator iter(&_relocation_set);
 448   for (ZPage* page; iter.next(&page);) {
 449     // Prepare for relocation
 450     page->set_forwarding();
 451 
 452     // Update pagetable
 453     _pagetable.set_relocating(page);
 454   }
 455 }
 456 
 457 void ZHeap::reset_relocation_set() {
 458   ZRelocationSetIterator iter(&_relocation_set);
 459   for (ZPage* page; iter.next(&page);) {
 460     // Reset relocation information
 461     page->reset_forwarding();
 462 
 463     // Update pagetable
 464     _pagetable.clear_relocating(page);
 465   }
 466 }
 467 
 468 void ZHeap::relocate_start() {
 469   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 470 
 471   // Update statistics
 472   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 473 
 474   // Flip address view
 475   ZAddressMasks::flip_to_remapped();
 476   flip_views();
 477 
 478   // Enter relocate phase
 479   ZGlobalPhase = ZPhaseRelocate;
 480 
 481   // Update statistics
 482   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 483 
 484   // Remap/Relocate roots
 485   _relocate.start();
 486 }
 487 
 488 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
 489   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
 490   ZPage* const page = _pagetable.get(addr);
 491   const bool retained = retain_page(page);
 492   const uintptr_t new_addr = page->relocate_object(addr);
 493   if (retained) {
 494     release_page(page, true /* reclaimed */);
 495   }
 496 
 497   return new_addr;
 498 }
 499 
 500 uintptr_t ZHeap::forward_object(uintptr_t addr) {
 501   assert(ZGlobalPhase == ZPhaseMark ||
 502          ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
 503   ZPage* const page = _pagetable.get(addr);
 504   return page->forward_object(addr);
 505 }
 506 
 507 void ZHeap::relocate() {
 508   // Relocate relocation set
 509   const bool success = _relocate.relocate(&_relocation_set);
 510 
 511   // Update statistics
 512   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 513   ZStatRelocation::set_at_relocate_end(success);
 514   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 515                                  used(), used_high(), used_low());
 516 }
 517 
 518 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
 519   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 520 
 521   ZHeapIterator iter(visit_referents);
 522   iter.objects_do(cl);
 523 }
 524 
 525 void ZHeap::serviceability_initialize() {
 526   _serviceability.initialize();
 527 }
 528 
 529 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 530   return _serviceability.memory_manager();
 531 }
 532 
 533 MemoryPool* ZHeap::serviceability_memory_pool() {
 534   return _serviceability.memory_pool();
 535 }
 536 
 537 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 538   return _serviceability.counters();
 539 }
 540 
 541 void ZHeap::print_on(outputStream* st) const {
 542   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 543                used() / M,
 544                capacity() / M,
 545                max_capacity() / M);
 546   MetaspaceUtils::print_on(st);
 547 }
 548 
 549 void ZHeap::print_extended_on(outputStream* st) const {
 550   print_on(st);
 551   st->cr();
 552 
 553   ZPageTableIterator iter(&_pagetable);
 554   for (ZPage* page; iter.next(&page);) {
 555     page->print_on(st);
 556   }
 557 
 558   st->cr();
 559 }
 560 
 561 class ZVerifyRootsTask : public ZTask {
 562 private:
 563   ZRootsIterator     _strong_roots;
 564   ZWeakRootsIterator _weak_roots;
 565 
 566 public:
 567   ZVerifyRootsTask() :
 568       ZTask("ZVerifyRootsTask"),
 569       _strong_roots(),
 570       _weak_roots() {}
 571 
 572   virtual void work() {
 573     ZVerifyRootOopClosure cl;
 574     _strong_roots.oops_do(&cl);
 575     _weak_roots.oops_do(&cl);
 576   }
 577 };
 578 
 579 void ZHeap::verify() {
 580   // Heap verification can only be done between mark end and
 581   // relocate start. This is the only window where all oop are
 582   // good and the whole heap is in a consistent state.
 583   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 584 
 585   {
 586     ZVerifyRootsTask task;
 587     _workers.run_parallel(&task);
 588   }
 589 
 590   {
 591     ZVerifyObjectClosure cl;
 592     object_iterate(&cl, false /* visit_referents */);
 593   }
 594 }