1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/oopStorage.hpp"
  26 #include "gc/z/zAddress.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zList.inline.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zMark.inline.hpp"
  33 #include "gc/z/zOopClosures.inline.hpp"
  34 #include "gc/z/zPage.inline.hpp"
  35 #include "gc/z/zPageTable.inline.hpp"
  36 #include "gc/z/zRelocationSet.inline.hpp"
  37 #include "gc/z/zResurrection.hpp"
  38 #include "gc/z/zRootsIterator.hpp"
  39 #include "gc/z/zStat.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zThread.hpp"
  42 #include "gc/z/zTracer.inline.hpp"
  43 #include "gc/z/zVirtualMemory.inline.hpp"
  44 #include "gc/z/zWorkers.inline.hpp"
  45 #include "logging/log.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 static const ZStatSampler  ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  53 static const ZStatSampler  ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  54 static const ZStatSampler  ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  55 static const ZStatSampler  ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  56 static const ZStatCounter  ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  57 static const ZStatCounter  ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  58 
  59 ZHeap* ZHeap::_heap = NULL;
  60 
  61 ZHeap::ZHeap() :
  62     _initialize(),
  63     _workers(),
  64     _object_allocator(_workers.nworkers()),
  65     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  66     _pagetable(),
  67     _mark(&_workers, &_pagetable),
  68     _reference_processor(&_workers),
  69     _weak_roots_processor(&_workers),
  70     _relocate(&_workers),
  71     _relocation_set(),
  72     _serviceability(heap_min_size(), heap_max_size()) {
  73   // Install global heap instance
  74   assert(_heap == NULL, "Already initialized");
  75   _heap = this;
  76 
  77   // Update statistics
  78   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  79 }
  80 
  81 size_t ZHeap::heap_min_size() const {
  82   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
  83   return MIN2(aligned_min_size, heap_max_size());
  84 }
  85 
  86 size_t ZHeap::heap_max_size() const {
  87   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
  88   return MIN2(aligned_max_size, ZAddressOffsetMax);
  89 }
  90 
  91 size_t ZHeap::heap_max_reserve_size() const {
  92   // Reserve one small page per worker plus one shared medium page. This is still just
  93   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  94   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  95   return MIN2(max_reserve_size, heap_max_size());
  96 }
  97 
  98 bool ZHeap::is_initialized() const {
  99   return _page_allocator.is_initialized();
 100 }
 101 
 102 size_t ZHeap::min_capacity() const {
 103   return heap_min_size();
 104 }
 105 
 106 size_t ZHeap::max_capacity() const {
 107   return _page_allocator.max_capacity();
 108 }
 109 
 110 size_t ZHeap::capacity() const {
 111   return _page_allocator.capacity();
 112 }
 113 
 114 size_t ZHeap::max_reserve() const {
 115   return _page_allocator.max_reserve();
 116 }
 117 
 118 size_t ZHeap::used_high() const {
 119   return _page_allocator.used_high();
 120 }
 121 
 122 size_t ZHeap::used_low() const {
 123   return _page_allocator.used_low();
 124 }
 125 
 126 size_t ZHeap::used() const {
 127   return _page_allocator.used();
 128 }
 129 
 130 size_t ZHeap::allocated() const {
 131   return _page_allocator.allocated();
 132 }
 133 
 134 size_t ZHeap::reclaimed() const {
 135   return _page_allocator.reclaimed();
 136 }
 137 
 138 size_t ZHeap::tlab_capacity() const {
 139   return capacity();
 140 }
 141 
 142 size_t ZHeap::tlab_used() const {
 143   return _object_allocator.used();
 144 }
 145 
 146 size_t ZHeap::max_tlab_size() const {
 147   return ZObjectSizeLimitSmall;
 148 }
 149 
 150 size_t ZHeap::unsafe_max_tlab_alloc() const {
 151   size_t size = _object_allocator.remaining();
 152 
 153   if (size < MinTLABSize) {
 154     // The remaining space in the allocator is not enough to
 155     // fit the smallest possible TLAB. This means that the next
 156     // TLAB allocation will force the allocator to get a new
 157     // backing page anyway, which in turn means that we can then
 158     // fit the larges possible TLAB.
 159     size = max_tlab_size();
 160   }
 161 
 162   return MIN2(size, max_tlab_size());
 163 }
 164 
 165 bool ZHeap::is_in(uintptr_t addr) const {
 166   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
 167     return false;
 168   }
 169 
 170   const ZPage* const page = _pagetable.get(addr);
 171   if (page != NULL) {
 172     return page->is_in(addr);
 173   }
 174 
 175   return false;
 176 }
 177 
 178 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 179   const ZPage* const page = _pagetable.get(addr);
 180   return page->block_start(addr);
 181 }
 182 
 183 size_t ZHeap::block_size(uintptr_t addr) const {
 184   const ZPage* const page = _pagetable.get(addr);
 185   return page->block_size(addr);
 186 }
 187 
 188 bool ZHeap::block_is_obj(uintptr_t addr) const {
 189   const ZPage* const page = _pagetable.get(addr);
 190   return page->block_is_obj(addr);
 191 }
 192 
 193 ZPageTableEntry* ZHeap::pagetable_addr() const {
 194   return _pagetable.addr();
 195 }
 196 
 197 void ZHeap::set_boost_worker_threads(bool boost) {
 198   _workers.set_boost(boost);
 199 }
 200 
 201 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 202   _workers.threads_do(tc);
 203 }
 204 
 205 void ZHeap::print_worker_threads_on(outputStream* st) const {
 206   _workers.print_threads_on(st);
 207 }
 208 
 209 void ZHeap::out_of_memory() {
 210   ResourceMark rm;
 211 
 212   ZStatInc(ZCounterOutOfMemory);
 213   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 214 }
 215 
 216 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 217   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 218   if (page != NULL) {
 219     // Update pagetable
 220     _pagetable.insert(page);
 221   }
 222 
 223   return page;
 224 }
 225 
 226 void ZHeap::undo_alloc_page(ZPage* page) {
 227   assert(page->is_allocating(), "Invalid page state");
 228 
 229   ZStatInc(ZCounterUndoPageAllocation);
 230   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 231                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 232 
 233   release_page(page, false /* reclaimed */);
 234 }
 235 
 236 bool ZHeap::retain_page(ZPage* page) {
 237   return page->inc_refcount();
 238 }
 239 
 240 void ZHeap::release_page(ZPage* page, bool reclaimed) {
 241   if (page->dec_refcount()) {
 242     _page_allocator.free_page(page, reclaimed);
 243   }
 244 }
 245 
 246 void ZHeap::flip_views() {
 247   // For debugging only
 248   if (ZUnmapBadViews) {
 249     // Flip pages
 250     ZPageTableIterator iter(&_pagetable);
 251     for (ZPage* page; iter.next(&page);) {
 252       if (!page->is_detached()) {
 253         _page_allocator.flip_page(page);
 254       }
 255     }
 256 
 257     // Flip pre-mapped memory
 258     _page_allocator.flip_pre_mapped();
 259   }
 260 }
 261 
 262 void ZHeap::mark_start() {
 263   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 264 
 265   // Update statistics
 266   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 267 
 268   // Retire TLABs
 269   _object_allocator.retire_tlabs();
 270 
 271   // Flip address view
 272   ZAddressMasks::flip_to_marked();
 273   flip_views();
 274 
 275   // Reset allocated/reclaimed/used statistics
 276   _page_allocator.reset_statistics();
 277 
 278   // Reset encountered/dropped/enqueued statistics
 279   _reference_processor.reset_statistics();
 280 
 281   // Enter mark phase
 282   ZGlobalPhase = ZPhaseMark;
 283 
 284   // Reset marking information and mark roots
 285   _mark.start();
 286 
 287   // Update statistics
 288   ZStatHeap::set_at_mark_start(capacity(), used());
 289 }
 290 
 291 void ZHeap::mark() {
 292   _mark.mark();
 293 }
 294 
 295 void ZHeap::mark_flush_and_free() {
 296   _mark.flush_and_free();
 297 }
 298 
 299 class ZFixupPartialLoadsTask : public ZTask {
 300 private:
 301   ZThreadRootsIterator _thread_roots;
 302 
 303 public:
 304   ZFixupPartialLoadsTask() :
 305       ZTask("ZFixupPartialLoadsTask"),
 306       _thread_roots() {}
 307 
 308   virtual void work() {
 309     ZMarkRootOopClosure cl;
 310     _thread_roots.oops_do(&cl);
 311   }
 312 };
 313 
 314 void ZHeap::fixup_partial_loads() {
 315   ZFixupPartialLoadsTask task;
 316   _workers.run_parallel(&task);
 317 }
 318 
 319 bool ZHeap::mark_end() {
 320   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 321 
 322   // C2 can generate code where a safepoint poll is inserted
 323   // between a load and the associated load barrier. To handle
 324   // this case we need to rescan the thread stack here to make
 325   // sure such oops are marked.
 326   fixup_partial_loads();
 327 
 328   // Try end marking
 329   if (!_mark.end()) {
 330     // Marking not completed, continue concurrent mark
 331     return false;
 332   }
 333 
 334   // Enter mark completed phase
 335   ZGlobalPhase = ZPhaseMarkCompleted;
 336 
 337   // Resize metaspace
 338   MetaspaceGC::compute_new_size();
 339 
 340   // Update statistics
 341   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 342   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 343 
 344   // Block resurrection of weak/phantom references
 345   ZResurrection::block();
 346 
 347   // Process weak roots
 348   _weak_roots_processor.process_weak_roots();
 349 
 350   // Verification
 351   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
 352     Universe::verify();
 353   }
 354 
 355   return true;
 356 }
 357 
 358 void ZHeap::set_soft_reference_policy(bool clear) {
 359   _reference_processor.set_soft_reference_policy(clear);
 360 }
 361 
 362 void ZHeap::process_non_strong_references() {
 363   // Process and enqueue Soft/Weak/Final/PhantomReferences
 364   _reference_processor.process_and_enqueue_references();
 365 
 366   // Process concurrent weak roots
 367   _weak_roots_processor.process_concurrent_weak_roots();
 368 
 369   // Unblock resurrection of weak/phantom references
 370   ZResurrection::unblock();
 371 }
 372 
 373 void ZHeap::destroy_detached_pages() {
 374   ZList<ZPage> list;
 375 
 376   _page_allocator.flush_detached_pages(&list);
 377 
 378   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 379     // Remove pagetable entry
 380     _pagetable.remove(page);
 381 
 382     // Delete the page
 383     _page_allocator.destroy_page(page);
 384   }
 385 }
 386 
 387 void ZHeap::select_relocation_set() {
 388   // Register relocatable pages with selector
 389   ZRelocationSetSelector selector;
 390   ZPageTableIterator iter(&_pagetable);
 391   for (ZPage* page; iter.next(&page);) {
 392     if (!page->is_relocatable()) {
 393       // Not relocatable, don't register
 394       continue;
 395     }
 396 
 397     if (page->is_marked()) {
 398       // Register live page
 399       selector.register_live_page(page);
 400     } else {
 401       // Register garbage page
 402       selector.register_garbage_page(page);
 403 
 404       // Reclaim page immediately
 405       release_page(page, true /* reclaimed */);
 406     }
 407   }
 408 
 409   // Select pages to relocate
 410   selector.select(&_relocation_set);
 411 
 412   // Update statistics
 413   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 414   ZStatHeap::set_at_select_relocation_set(selector.live(),
 415                                           selector.garbage(),
 416                                           reclaimed());
 417 }
 418 
 419 void ZHeap::prepare_relocation_set() {
 420   ZRelocationSetIterator iter(&_relocation_set);
 421   for (ZPage* page; iter.next(&page);) {
 422     // Prepare for relocation
 423     page->set_forwarding();
 424 
 425     // Update pagetable
 426     _pagetable.set_relocating(page);
 427   }
 428 }
 429 
 430 void ZHeap::reset_relocation_set() {
 431   ZRelocationSetIterator iter(&_relocation_set);
 432   for (ZPage* page; iter.next(&page);) {
 433     // Reset relocation information
 434     page->reset_forwarding();
 435 
 436     // Update pagetable
 437     _pagetable.clear_relocating(page);
 438   }
 439 }
 440 
 441 void ZHeap::relocate_start() {
 442   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 443 
 444   // Update statistics
 445   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 446 
 447   // Flip address view
 448   ZAddressMasks::flip_to_remapped();
 449   flip_views();
 450 
 451   // Remap TLABs
 452   _object_allocator.remap_tlabs();
 453 
 454   // Enter relocate phase
 455   ZGlobalPhase = ZPhaseRelocate;
 456 
 457   // Update statistics
 458   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 459 
 460   // Remap/Relocate roots
 461   _relocate.start();
 462 }
 463 
 464 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
 465   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
 466   ZPage* const page = _pagetable.get(addr);
 467   const bool retained = retain_page(page);
 468   const uintptr_t new_addr = page->relocate_object(addr);
 469   if (retained) {
 470     release_page(page, true /* reclaimed */);
 471   }
 472 
 473   return new_addr;
 474 }
 475 
 476 uintptr_t ZHeap::forward_object(uintptr_t addr) {
 477   assert(ZGlobalPhase == ZPhaseMark ||
 478          ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
 479   ZPage* const page = _pagetable.get(addr);
 480   return page->forward_object(addr);
 481 }
 482 
 483 void ZHeap::relocate() {
 484   // Relocate relocation set
 485   const bool success = _relocate.relocate(&_relocation_set);
 486 
 487   // Update statistics
 488   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 489   ZStatRelocation::set_at_relocate_end(success);
 490   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 491                                  used(), used_high(), used_low());
 492 }
 493 
 494 void ZHeap::object_iterate(ObjectClosure* cl) {
 495   // Should only be called in a safepoint after mark end.
 496   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 497 
 498   ZHeapIterator iter;
 499   iter.objects_do(cl);
 500 }
 501 
 502 void ZHeap::serviceability_initialize() {
 503   _serviceability.initialize();
 504 }
 505 
 506 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 507   return _serviceability.memory_manager();
 508 }
 509 
 510 MemoryPool* ZHeap::serviceability_memory_pool() {
 511   return _serviceability.memory_pool();
 512 }
 513 
 514 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 515   return _serviceability.counters();
 516 }
 517 
 518 void ZHeap::print_on(outputStream* st) const {
 519   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 520                used() / M,
 521                capacity() / M,
 522                max_capacity() / M);
 523   MetaspaceAux::print_on(st);
 524 }
 525 
 526 void ZHeap::print_extended_on(outputStream* st) const {
 527   print_on(st);
 528   st->cr();
 529 
 530   ZPageTableIterator iter(&_pagetable);
 531   for (ZPage* page; iter.next(&page);) {
 532     page->print_on(st);
 533   }
 534 
 535   st->cr();
 536 }
 537 
 538 class ZVerifyRootsTask : public ZTask {
 539 private:
 540   ZRootsIterator     _strong_roots;
 541   ZWeakRootsIterator _weak_roots;
 542 
 543 public:
 544   ZVerifyRootsTask() :
 545       ZTask("ZVerifyRootsTask"),
 546       _strong_roots(),
 547       _weak_roots() {}
 548 
 549   virtual void work() {
 550     ZVerifyRootOopClosure cl;
 551     _strong_roots.oops_do(&cl);
 552     _weak_roots.oops_do(&cl);
 553   }
 554 };
 555 
 556 void ZHeap::verify() {
 557   // Heap verification can only be done between mark end and
 558   // relocate start. This is the only window where all oop are
 559   // good and the whole heap is in a consistent state.
 560   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 561 
 562   {
 563     ZVerifyRootsTask task;
 564     _workers.run_parallel(&task);
 565   }
 566 
 567   {
 568     ZVerifyObjectClosure cl;
 569     object_iterate(&cl);
 570   }
 571 }