1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/oopStorage.hpp"
  26 #include "gc/z/zAddress.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zList.inline.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zMark.inline.hpp"
  33 #include "gc/z/zOopClosures.inline.hpp"
  34 #include "gc/z/zPage.inline.hpp"
  35 #include "gc/z/zPageTable.inline.hpp"
  36 #include "gc/z/zRelocationSet.inline.hpp"
  37 #include "gc/z/zResurrection.hpp"
  38 #include "gc/z/zRootsIterator.hpp"
  39 #include "gc/z/zStat.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zThread.hpp"
  42 #include "gc/z/zTracer.inline.hpp"
  43 #include "gc/z/zVirtualMemory.inline.hpp"
  44 #include "gc/z/zWorkers.inline.hpp"
  45 #include "logging/log.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 static const ZStatSampler  ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  53 static const ZStatSampler  ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  54 static const ZStatSampler  ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  55 static const ZStatSampler  ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  56 static const ZStatCounter  ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  57 static const ZStatCounter  ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  58 static const ZStatSubPhase ZPhaseConcurrentReferencesProcessing("Concurrent References Processing");
  59 static const ZStatSubPhase ZPhaseConcurrentWeakRootsProcessing("Concurrent Weak Roots Processing");
  60 
  61 ZHeap* ZHeap::_heap = NULL;
  62 
  63 ZHeap::ZHeap() :
  64     _initialize(),
  65     _workers(),
  66     _object_allocator(_workers.nworkers()),
  67     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  68     _pagetable(),
  69     _mark(&_workers, &_pagetable),
  70     _reference_processor(&_workers),
  71     _weak_roots_processor(&_workers),
  72     _relocate(&_workers),
  73     _relocation_set(),
  74     _serviceability(heap_min_size(), heap_max_size()) {
  75   // Install global heap instance
  76   assert(_heap == NULL, "Already initialized");
  77   _heap = this;
  78 
  79   // Update statistics
  80   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  81 }
  82 
  83 size_t ZHeap::heap_min_size() const {
  84   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
  85   return MIN2(aligned_min_size, heap_max_size());
  86 }
  87 
  88 size_t ZHeap::heap_max_size() const {
  89   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
  90   return MIN2(aligned_max_size, ZAddressOffsetMax);
  91 }
  92 
  93 size_t ZHeap::heap_max_reserve_size() const {
  94   // Reserve one small page per worker plus one shared medium page. This is still just
  95   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  96   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  97   return MIN2(max_reserve_size, heap_max_size());
  98 }
  99 
 100 bool ZHeap::is_initialized() const {
 101   return _page_allocator.is_initialized();
 102 }
 103 
 104 size_t ZHeap::min_capacity() const {
 105   return heap_min_size();
 106 }
 107 
 108 size_t ZHeap::max_capacity() const {
 109   return _page_allocator.max_capacity();
 110 }
 111 
 112 size_t ZHeap::capacity() const {
 113   return _page_allocator.capacity();
 114 }
 115 
 116 size_t ZHeap::max_reserve() const {
 117   return _page_allocator.max_reserve();
 118 }
 119 
 120 size_t ZHeap::used_high() const {
 121   return _page_allocator.used_high();
 122 }
 123 
 124 size_t ZHeap::used_low() const {
 125   return _page_allocator.used_low();
 126 }
 127 
 128 size_t ZHeap::used() const {
 129   return _page_allocator.used();
 130 }
 131 
 132 size_t ZHeap::allocated() const {
 133   return _page_allocator.allocated();
 134 }
 135 
 136 size_t ZHeap::reclaimed() const {
 137   return _page_allocator.reclaimed();
 138 }
 139 
 140 size_t ZHeap::tlab_capacity() const {
 141   return capacity();
 142 }
 143 
 144 size_t ZHeap::tlab_used() const {
 145   return _object_allocator.used();
 146 }
 147 
 148 size_t ZHeap::max_tlab_size() const {
 149   return ZObjectSizeLimitSmall;
 150 }
 151 
 152 size_t ZHeap::unsafe_max_tlab_alloc() const {
 153   size_t size = _object_allocator.remaining();
 154 
 155   if (size < MinTLABSize) {
 156     // The remaining space in the allocator is not enough to
 157     // fit the smallest possible TLAB. This means that the next
 158     // TLAB allocation will force the allocator to get a new
 159     // backing page anyway, which in turn means that we can then
 160     // fit the larges possible TLAB.
 161     size = max_tlab_size();
 162   }
 163 
 164   return MIN2(size, max_tlab_size());
 165 }
 166 
 167 bool ZHeap::is_in(uintptr_t addr) const {
 168   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
 169     return false;
 170   }
 171 
 172   const ZPage* const page = _pagetable.get(addr);
 173   if (page != NULL) {
 174     return page->is_in(addr);
 175   }
 176 
 177   return false;
 178 }
 179 
 180 uintptr_t ZHeap::block_start(uintptr_t addr) const {
 181   const ZPage* const page = _pagetable.get(addr);
 182   return page->block_start(addr);
 183 }
 184 
 185 size_t ZHeap::block_size(uintptr_t addr) const {
 186   const ZPage* const page = _pagetable.get(addr);
 187   return page->block_size(addr);
 188 }
 189 
 190 bool ZHeap::block_is_obj(uintptr_t addr) const {
 191   const ZPage* const page = _pagetable.get(addr);
 192   return page->block_is_obj(addr);
 193 }
 194 
 195 ZPageTableEntry* ZHeap::pagetable_addr() const {
 196   return _pagetable.addr();
 197 }
 198 
 199 void ZHeap::set_boost_worker_threads(bool boost) {
 200   _workers.set_boost(boost);
 201 }
 202 
 203 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 204   _workers.threads_do(tc);
 205 }
 206 
 207 void ZHeap::print_worker_threads_on(outputStream* st) const {
 208   _workers.print_threads_on(st);
 209 }
 210 
 211 void ZHeap::out_of_memory() {
 212   ResourceMark rm;
 213 
 214   ZStatInc(ZCounterOutOfMemory);
 215   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 216 }
 217 
 218 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 219   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 220   if (page != NULL) {
 221     // Update pagetable
 222     _pagetable.insert(page);
 223   }
 224 
 225   return page;
 226 }
 227 
 228 void ZHeap::undo_alloc_page(ZPage* page) {
 229   assert(page->is_allocating(), "Invalid page state");
 230 
 231   ZStatInc(ZCounterUndoPageAllocation);
 232   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 233                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 234 
 235   release_page(page, false /* reclaimed */);
 236 }
 237 
 238 bool ZHeap::retain_page(ZPage* page) {
 239   return page->inc_refcount();
 240 }
 241 
 242 void ZHeap::release_page(ZPage* page, bool reclaimed) {
 243   if (page->dec_refcount()) {
 244     _page_allocator.free_page(page, reclaimed);
 245   }
 246 }
 247 
 248 void ZHeap::flip_views() {
 249   // For debugging only
 250   if (ZUnmapBadViews) {
 251     // Flip pages
 252     ZPageTableIterator iter(&_pagetable);
 253     for (ZPage* page; iter.next(&page);) {
 254       if (!page->is_detached()) {
 255         _page_allocator.flip_page(page);
 256       }
 257     }
 258 
 259     // Flip pre-mapped memory
 260     _page_allocator.flip_pre_mapped();
 261   }
 262 }
 263 
 264 void ZHeap::mark_start() {
 265   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 266 
 267   // Update statistics
 268   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 269 
 270   // Retire TLABs
 271   _object_allocator.retire_tlabs();
 272 
 273   // Flip address view
 274   ZAddressMasks::flip_to_marked();
 275   flip_views();
 276 
 277   // Reset allocated/reclaimed/used statistics
 278   _page_allocator.reset_statistics();
 279 
 280   // Reset encountered/dropped/enqueued statistics
 281   _reference_processor.reset_statistics();
 282 
 283   // Enter mark phase
 284   ZGlobalPhase = ZPhaseMark;
 285 
 286   // Reset marking information and mark roots
 287   _mark.start();
 288 
 289   // Update statistics
 290   ZStatHeap::set_at_mark_start(capacity(), used());
 291 }
 292 
 293 void ZHeap::mark() {
 294   _mark.mark();
 295 }
 296 
 297 void ZHeap::mark_flush_and_free() {
 298   _mark.flush_and_free();
 299 }
 300 
 301 class ZFixupPartialLoadsTask : public ZTask {
 302 private:
 303   ZThreadRootsIterator _thread_roots;
 304 
 305 public:
 306   ZFixupPartialLoadsTask() :
 307       ZTask("ZFixupPartialLoadsTask"),
 308       _thread_roots() {}
 309 
 310   virtual void work() {
 311     ZMarkRootOopClosure cl;
 312     _thread_roots.oops_do(&cl);
 313   }
 314 };
 315 
 316 void ZHeap::fixup_partial_loads() {
 317   ZFixupPartialLoadsTask task;
 318   _workers.run_parallel(&task);
 319 }
 320 
 321 bool ZHeap::mark_end() {
 322   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 323 
 324   // C2 can generate code where a safepoint poll is inserted
 325   // between a load and the associated load barrier. To handle
 326   // this case we need to rescan the thread stack here to make
 327   // sure such oops are marked.
 328   fixup_partial_loads();
 329 
 330   // Try end marking
 331   if (!_mark.end()) {
 332     // Marking not completed, continue concurrent mark
 333     return false;
 334   }
 335 
 336   // Enter mark completed phase
 337   ZGlobalPhase = ZPhaseMarkCompleted;
 338 
 339   // Resize metaspace
 340   MetaspaceGC::compute_new_size();
 341 
 342   // Update statistics
 343   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 344   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 345 
 346   // Block resurrection of weak/phantom references
 347   ZResurrection::block();
 348 
 349   // Clean weak roots
 350   _weak_roots_processor.process_weak_roots();
 351 
 352   // Verification
 353   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
 354     Universe::verify();
 355   }
 356 
 357   return true;
 358 }
 359 
 360 void ZHeap::set_soft_reference_policy(bool clear) {
 361   _reference_processor.set_soft_reference_policy(clear);
 362 }
 363 
 364 void ZHeap::concurrent_weak_processing() {
 365   {
 366     ZStatTimer timer(ZPhaseConcurrentReferencesProcessing);
 367     _reference_processor.process_and_enqueue_references();
 368   }
 369 
 370   {
 371     ZStatTimer timer(ZPhaseConcurrentWeakRootsProcessing);
 372     _weak_roots_processor.process_concurrent_weak_roots();
 373   }
 374 
 375   // Unblock resurrection of weak/phantom references
 376   ZResurrection::unblock();
 377 }
 378 
 379 void ZHeap::destroy_detached_pages() {
 380   ZList<ZPage> list;
 381 
 382   _page_allocator.flush_detached_pages(&list);
 383 
 384   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 385     // Remove pagetable entry
 386     _pagetable.remove(page);
 387 
 388     // Delete the page
 389     _page_allocator.destroy_page(page);
 390   }
 391 }
 392 
 393 void ZHeap::select_relocation_set() {
 394   // Register relocatable pages with selector
 395   ZRelocationSetSelector selector;
 396   ZPageTableIterator iter(&_pagetable);
 397   for (ZPage* page; iter.next(&page);) {
 398     if (!page->is_relocatable()) {
 399       // Not relocatable, don't register
 400       continue;
 401     }
 402 
 403     if (page->is_marked()) {
 404       // Register live page
 405       selector.register_live_page(page);
 406     } else {
 407       // Register garbage page
 408       selector.register_garbage_page(page);
 409 
 410       // Reclaim page immediately
 411       release_page(page, true /* reclaimed */);
 412     }
 413   }
 414 
 415   // Select pages to relocate
 416   selector.select(&_relocation_set);
 417 
 418   // Update statistics
 419   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 420   ZStatHeap::set_at_select_relocation_set(selector.live(),
 421                                           selector.garbage(),
 422                                           reclaimed());
 423 }
 424 
 425 void ZHeap::prepare_relocation_set() {
 426   ZRelocationSetIterator iter(&_relocation_set);
 427   for (ZPage* page; iter.next(&page);) {
 428     // Prepare for relocation
 429     page->set_forwarding();
 430 
 431     // Update pagetable
 432     _pagetable.set_relocating(page);
 433   }
 434 }
 435 
 436 void ZHeap::reset_relocation_set() {
 437   ZRelocationSetIterator iter(&_relocation_set);
 438   for (ZPage* page; iter.next(&page);) {
 439     // Reset relocation information
 440     page->reset_forwarding();
 441 
 442     // Update pagetable
 443     _pagetable.clear_relocating(page);
 444   }
 445 }
 446 
 447 void ZHeap::relocate_start() {
 448   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 449 
 450   // Update statistics
 451   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 452 
 453   // Flip address view
 454   ZAddressMasks::flip_to_remapped();
 455   flip_views();
 456 
 457   // Remap TLABs
 458   _object_allocator.remap_tlabs();
 459 
 460   // Enter relocate phase
 461   ZGlobalPhase = ZPhaseRelocate;
 462 
 463   // Update statistics
 464   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 465 
 466   // Remap/Relocate roots
 467   _relocate.start();
 468 }
 469 
 470 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
 471   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
 472   ZPage* const page = _pagetable.get(addr);
 473   const bool retained = retain_page(page);
 474   const uintptr_t new_addr = page->relocate_object(addr);
 475   if (retained) {
 476     release_page(page, true /* reclaimed */);
 477   }
 478 
 479   return new_addr;
 480 }
 481 
 482 uintptr_t ZHeap::forward_object(uintptr_t addr) {
 483   assert(ZGlobalPhase == ZPhaseMark ||
 484          ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
 485   ZPage* const page = _pagetable.get(addr);
 486   return page->forward_object(addr);
 487 }
 488 
 489 void ZHeap::relocate() {
 490   // Relocate relocation set
 491   const bool success = _relocate.relocate(&_relocation_set);
 492 
 493   // Update statistics
 494   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 495   ZStatRelocation::set_at_relocate_end(success);
 496   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 497                                  used(), used_high(), used_low());
 498 }
 499 
 500 void ZHeap::object_iterate(ObjectClosure* cl) {
 501   // Should only be called in a safepoint after mark end.
 502   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 503 
 504   ZHeapIterator iter;
 505   iter.objects_do(cl);
 506 }
 507 
 508 void ZHeap::serviceability_initialize() {
 509   _serviceability.initialize();
 510 }
 511 
 512 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 513   return _serviceability.memory_manager();
 514 }
 515 
 516 MemoryPool* ZHeap::serviceability_memory_pool() {
 517   return _serviceability.memory_pool();
 518 }
 519 
 520 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 521   return _serviceability.counters();
 522 }
 523 
 524 void ZHeap::print_on(outputStream* st) const {
 525   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 526                used() / M,
 527                capacity() / M,
 528                max_capacity() / M);
 529   MetaspaceAux::print_on(st);
 530 }
 531 
 532 void ZHeap::print_extended_on(outputStream* st) const {
 533   print_on(st);
 534   st->cr();
 535 
 536   ZPageTableIterator iter(&_pagetable);
 537   for (ZPage* page; iter.next(&page);) {
 538     page->print_on(st);
 539   }
 540 
 541   st->cr();
 542 }
 543 
 544 class ZVerifyRootsTask : public ZTask {
 545 private:
 546   ZRootsIterator     _strong_roots;
 547   ZWeakRootsIterator _weak_roots;
 548 
 549 public:
 550   ZVerifyRootsTask() :
 551       ZTask("ZVerifyRootsTask"),
 552       _strong_roots(),
 553       _weak_roots() {}
 554 
 555   virtual void work() {
 556     ZVerifyRootOopClosure cl;
 557     _strong_roots.oops_do(&cl);
 558     _weak_roots.oops_do(&cl);
 559   }
 560 };
 561 
 562 void ZHeap::verify() {
 563   // Heap verification can only be done between mark end and
 564   // relocate start. This is the only window where all oop are
 565   // good and the whole heap is in a consistent state.
 566   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 567 
 568   {
 569     ZVerifyRootsTask task;
 570     _workers.run_parallel(&task);
 571   }
 572 
 573   {
 574     ZVerifyObjectClosure cl;
 575     object_iterate(&cl);
 576   }
 577 }