1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zMark.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRelocationSet.inline.hpp"
  34 #include "gc/z/zRelocationSetSelector.inline.hpp"
  35 #include "gc/z/zResurrection.hpp"
  36 #include "gc/z/zStat.hpp"
  37 #include "gc/z/zThread.inline.hpp"
  38 #include "gc/z/zTask.hpp"
  39 #include "gc/z/zVerify.hpp"
  40 #include "gc/z/zWorkers.inline.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/iterator.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "runtime/handshake.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/thread.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 class ZTask;
  50 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  51 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  52 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  53 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  54 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  55 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  56 
  57 ZHeap* ZHeap::_heap = NULL;
  58 
  59 ZHeap::ZHeap() :
  60     _workers(),
  61     _object_allocator(),
  62     _page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  63     _page_table(),
  64     _forwarding_table(),
  65     _mark(&_workers, &_page_table),
  66     _reference_processor(&_workers),
  67     _weak_roots_processor(&_workers),
  68     _relocate(&_workers),
  69     _relocation_set(),
  70     _unload(&_workers),
  71     _serviceability(heap_min_size(), heap_max_size()) {
  72   // Install global heap instance
  73   assert(_heap == NULL, "Already initialized");
  74   _heap = this;
  75 
  76   // Update statistics
  77   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  78 }
  79 
  80 size_t ZHeap::heap_min_size() const {
  81   return MinHeapSize;
  82 }
  83 
  84 size_t ZHeap::heap_initial_size() const {
  85   return InitialHeapSize;
  86 }
  87 
  88 size_t ZHeap::heap_max_size() const {
  89   return MaxHeapSize;
  90 }
  91 
  92 size_t ZHeap::heap_max_reserve_size() const {
  93   // Reserve one small page per worker plus one shared medium page. This is still just
  94   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  95   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  96   return MIN2(max_reserve_size, heap_max_size());
  97 }
  98 
  99 bool ZHeap::is_initialized() const {
 100   return _page_allocator.is_initialized() && _mark.is_initialized();
 101 }
 102 
 103 size_t ZHeap::min_capacity() const {
 104   return _page_allocator.min_capacity();
 105 }
 106 
 107 size_t ZHeap::max_capacity() const {
 108   return _page_allocator.max_capacity();
 109 }
 110 
 111 size_t ZHeap::soft_max_capacity() const {
 112   return _page_allocator.soft_max_capacity();
 113 }
 114 
 115 size_t ZHeap::capacity() const {
 116   return _page_allocator.capacity();
 117 }
 118 
 119 size_t ZHeap::max_reserve() const {
 120   return _page_allocator.max_reserve();
 121 }
 122 
 123 size_t ZHeap::used_high() const {
 124   return _page_allocator.used_high();
 125 }
 126 
 127 size_t ZHeap::used_low() const {
 128   return _page_allocator.used_low();
 129 }
 130 
 131 size_t ZHeap::used() const {
 132   return _page_allocator.used();
 133 }
 134 
 135 size_t ZHeap::unused() const {
 136   return _page_allocator.unused();
 137 }
 138 
 139 size_t ZHeap::allocated() const {
 140   return _page_allocator.allocated();
 141 }
 142 
 143 size_t ZHeap::reclaimed() const {
 144   return _page_allocator.reclaimed();
 145 }
 146 
 147 size_t ZHeap::tlab_capacity() const {
 148   return capacity();
 149 }
 150 
 151 size_t ZHeap::tlab_used() const {
 152   return _object_allocator.used();
 153 }
 154 
 155 size_t ZHeap::max_tlab_size() const {
 156   return ZObjectSizeLimitSmall;
 157 }
 158 
 159 size_t ZHeap::unsafe_max_tlab_alloc() const {
 160   size_t size = _object_allocator.remaining();
 161 
 162   if (size < MinTLABSize) {
 163     // The remaining space in the allocator is not enough to
 164     // fit the smallest possible TLAB. This means that the next
 165     // TLAB allocation will force the allocator to get a new
 166     // backing page anyway, which in turn means that we can then
 167     // fit the largest possible TLAB.
 168     size = max_tlab_size();
 169   }
 170 
 171   return MIN2(size, max_tlab_size());
 172 }
 173 
 174 bool ZHeap::is_in(uintptr_t addr) const {
 175   // An address is considered to be "in the heap" if it points into
 176   // the allocated part of a page, regardless of which heap view is
 177   // used. Note that an address with the finalizable metadata bit set
 178   // is not pointing into a heap view, and therefore not considered
 179   // to be "in the heap".
 180 
 181   if (ZAddress::is_in(addr)) {
 182     const ZPage* const page = _page_table.get(addr);
 183     if (page != NULL) {
 184       return page->is_in(addr);
 185     }
 186   }
 187 
 188   return false;
 189 }
 190 
 191 uint ZHeap::nconcurrent_worker_threads() const {
 192   return _workers.nconcurrent();
 193 }
 194 
 195 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 196   return _workers.nconcurrent_no_boost();
 197 }
 198 
 199 void ZHeap::set_boost_worker_threads(bool boost) {
 200   _workers.set_boost(boost);
 201 }
 202 
 203 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 204   _workers.threads_do(tc);
 205 }
 206 
 207 void ZHeap::print_worker_threads_on(outputStream* st) const {
 208   _workers.print_threads_on(st);
 209 }
 210 
 211 // A delegate class for AbstractGangTask to Ztask.
 212 class DelegatedZAbstractGangTask : public ZTask {
 213  private:
 214   AbstractGangTask* _task;
 215 
 216  public:
 217   DelegatedZAbstractGangTask(AbstractGangTask* task) :
 218       ZTask(task->name()),
 219       _task(task) { }
 220 
 221   virtual void work() {
 222     _task->work(ZThread::worker_id());
 223   }
 224 };
 225 
 226 Tickspan ZHeap::run_task(AbstractGangTask* task) {
 227   Ticks start = Ticks::now();
 228   DelegatedZAbstractGangTask dtask(task);
 229   _workers.run_parallel(&dtask);
 230   return Ticks::now() - start;
 231 }
 232 
 233 void ZHeap::out_of_memory() {
 234   ResourceMark rm;
 235 
 236   ZStatInc(ZCounterOutOfMemory);
 237   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 238 }
 239 
 240 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 241   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 242   if (page != NULL) {
 243     // Insert page table entry
 244     _page_table.insert(page);
 245   }
 246 
 247   return page;
 248 }
 249 
 250 void ZHeap::undo_alloc_page(ZPage* page) {
 251   assert(page->is_allocating(), "Invalid page state");
 252 
 253   ZStatInc(ZCounterUndoPageAllocation);
 254   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 255                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 256 
 257   free_page(page, false /* reclaimed */);
 258 }
 259 
 260 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 261   // Remove page table entry
 262   _page_table.remove(page);
 263 
 264   // Free page
 265   _page_allocator.free_page(page, reclaimed);
 266 }
 267 
 268 uint64_t ZHeap::uncommit(uint64_t delay) {
 269   return _page_allocator.uncommit(delay);
 270 }
 271 
 272 void ZHeap::flip_to_marked() {
 273   ZVerifyViewsFlip flip(&_page_allocator);
 274   ZAddress::flip_to_marked();
 275 }
 276 
 277 void ZHeap::flip_to_remapped() {
 278   ZVerifyViewsFlip flip(&_page_allocator);
 279   ZAddress::flip_to_remapped();
 280 }
 281 
 282 void ZHeap::mark_start() {
 283   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 284 
 285   // Update statistics
 286   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 287 
 288   // Flip address view
 289   flip_to_marked();
 290 
 291   // Retire allocating pages
 292   _object_allocator.retire_pages();
 293 
 294   // Reset allocated/reclaimed/used statistics
 295   _page_allocator.reset_statistics();
 296 
 297   // Reset encountered/dropped/enqueued statistics
 298   _reference_processor.reset_statistics();
 299 
 300   // Enter mark phase
 301   ZGlobalPhase = ZPhaseMark;
 302 
 303   // Reset marking information and mark roots
 304   _mark.start();
 305 
 306   // Update statistics
 307   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 308 }
 309 
 310 void ZHeap::mark(bool initial) {
 311   _mark.mark(initial);
 312 }
 313 
 314 void ZHeap::mark_flush_and_free(Thread* thread) {
 315   _mark.flush_and_free(thread);
 316 }
 317 
 318 bool ZHeap::mark_end() {
 319   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 320 
 321   // Try end marking
 322   if (!_mark.end()) {
 323     // Marking not completed, continue concurrent mark
 324     return false;
 325   }
 326 
 327   // Enter mark completed phase
 328   ZGlobalPhase = ZPhaseMarkCompleted;
 329 
 330   // Verify after mark
 331   ZVerify::after_mark();
 332 
 333   // Update statistics
 334   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 335   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 336 
 337   // Block resurrection of weak/phantom references
 338   ZResurrection::block();
 339 
 340   // Process weak roots
 341   _weak_roots_processor.process_weak_roots();
 342 
 343   // Prepare to unload stale metadata and nmethods
 344   _unload.prepare();
 345 
 346   return true;
 347 }
 348 
 349 void ZHeap::keep_alive(oop obj) {
 350   ZBarrier::keep_alive_barrier_on_oop(obj);
 351 }
 352 
 353 void ZHeap::set_soft_reference_policy(bool clear) {
 354   _reference_processor.set_soft_reference_policy(clear);
 355 }
 356 
 357 class ZRendezvousClosure : public HandshakeClosure {
 358 public:
 359   ZRendezvousClosure() :
 360       HandshakeClosure("ZRendezvous") {}
 361 
 362   void do_thread(Thread* thread) {}
 363 };
 364 
 365 void ZHeap::process_non_strong_references() {
 366   // Process Soft/Weak/Final/PhantomReferences
 367   _reference_processor.process_references();
 368 
 369   // Process concurrent weak roots
 370   _weak_roots_processor.process_concurrent_weak_roots();
 371 
 372   // Unlink stale metadata and nmethods
 373   _unload.unlink();
 374 
 375   // Perform a handshake. This is needed 1) to make sure that stale
 376   // metadata and nmethods are no longer observable. And 2), to
 377   // prevent the race where a mutator first loads an oop, which is
 378   // logically null but not yet cleared. Then this oop gets cleared
 379   // by the reference processor and resurrection is unblocked. At
 380   // this point the mutator could see the unblocked state and pass
 381   // this invalid oop through the normal barrier path, which would
 382   // incorrectly try to mark the oop.
 383   ZRendezvousClosure cl;
 384   Handshake::execute(&cl);
 385 
 386   // Unblock resurrection of weak/phantom references
 387   ZResurrection::unblock();
 388 
 389   // Purge stale metadata and nmethods that were unlinked
 390   _unload.purge();
 391 
 392   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 393   // must be done after unblocking resurrection. Otherwise the
 394   // Finalizer thread could call Reference.get() on the Finalizers
 395   // that were just enqueued, which would incorrectly return null
 396   // during the resurrection block window, since such referents
 397   // are only Finalizable marked.
 398   _reference_processor.enqueue_references();
 399 }
 400 
 401 void ZHeap::select_relocation_set() {
 402   // Do not allow pages to be deleted
 403   _page_allocator.enable_deferred_delete();
 404 
 405   // Register relocatable pages with selector
 406   ZRelocationSetSelector selector;
 407   ZPageTableIterator pt_iter(&_page_table);
 408   for (ZPage* page; pt_iter.next(&page);) {
 409     if (!page->is_relocatable()) {
 410       // Not relocatable, don't register
 411       continue;
 412     }
 413 
 414     if (page->is_marked()) {
 415       // Register live page
 416       selector.register_live_page(page);
 417     } else {
 418       // Register garbage page
 419       selector.register_garbage_page(page);
 420 
 421       // Reclaim page immediately
 422       free_page(page, true /* reclaimed */);
 423     }
 424   }
 425 
 426   // Allow pages to be deleted
 427   _page_allocator.disable_deferred_delete();
 428 
 429   // Select pages to relocate
 430   selector.select(&_relocation_set);
 431 
 432   // Setup forwarding table
 433   ZRelocationSetIterator rs_iter(&_relocation_set);
 434   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 435     _forwarding_table.insert(forwarding);
 436   }
 437 
 438   // Update statistics
 439   ZStatRelocation::set_at_select_relocation_set(selector.stats());
 440   ZStatHeap::set_at_select_relocation_set(selector.stats(), reclaimed());
 441 }
 442 
 443 void ZHeap::reset_relocation_set() {
 444   // Reset forwarding table
 445   ZRelocationSetIterator iter(&_relocation_set);
 446   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 447     _forwarding_table.remove(forwarding);
 448   }
 449 
 450   // Reset relocation set
 451   _relocation_set.reset();
 452 }
 453 
 454 void ZHeap::relocate_start() {
 455   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 456 
 457   // Finish unloading stale metadata and nmethods
 458   _unload.finish();
 459 
 460   // Flip address view
 461   flip_to_remapped();
 462 
 463   // Enter relocate phase
 464   ZGlobalPhase = ZPhaseRelocate;
 465 
 466   // Update statistics
 467   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 468   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 469 
 470   // Remap/Relocate roots
 471   _relocate.start();
 472 }
 473 
 474 void ZHeap::relocate() {
 475   // Relocate relocation set
 476   const bool success = _relocate.relocate(&_relocation_set);
 477 
 478   // Update statistics
 479   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 480   ZStatRelocation::set_at_relocate_end(success);
 481   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 482                                  used(), used_high(), used_low());
 483 }
 484 
 485 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
 486   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 487 
 488   ZHeapIterator iter;
 489   iter.objects_do(cl, visit_weaks);
 490 }
 491 
 492 void ZHeap::pages_do(ZPageClosure* cl) {
 493   ZPageTableIterator iter(&_page_table);
 494   for (ZPage* page; iter.next(&page);) {
 495     cl->do_page(page);
 496   }
 497   _page_allocator.pages_do(cl);
 498 }
 499 
 500 void ZHeap::serviceability_initialize() {
 501   _serviceability.initialize();
 502 }
 503 
 504 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 505   return _serviceability.memory_manager();
 506 }
 507 
 508 MemoryPool* ZHeap::serviceability_memory_pool() {
 509   return _serviceability.memory_pool();
 510 }
 511 
 512 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 513   return _serviceability.counters();
 514 }
 515 
 516 void ZHeap::print_on(outputStream* st) const {
 517   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 518                used() / M,
 519                capacity() / M,
 520                max_capacity() / M);
 521   MetaspaceUtils::print_on(st);
 522 }
 523 
 524 void ZHeap::print_extended_on(outputStream* st) const {
 525   print_on(st);
 526   st->cr();
 527 
 528   // Do not allow pages to be deleted
 529   _page_allocator.enable_deferred_delete();
 530 
 531   // Print all pages
 532   ZPageTableIterator iter(&_page_table);
 533   for (ZPage* page; iter.next(&page);) {
 534     page->print_on(st);
 535   }
 536 
 537   // Allow pages to be deleted
 538   _page_allocator.enable_deferred_delete();
 539 
 540   st->cr();
 541 }
 542 
 543 bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
 544   if (LocationPrinter::is_valid_obj((void*)addr)) {
 545     st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
 546     ZOop::from_address(addr)->print_on(st);
 547     return true;
 548   }
 549 
 550   return false;
 551 }
 552 
 553 void ZHeap::verify() {
 554   // Heap verification can only be done between mark end and
 555   // relocate start. This is the only window where all oop are
 556   // good and the whole heap is in a consistent state.
 557   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 558 
 559   ZVerify::after_weak_processing();
 560 }