1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zMark.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRelocationSet.inline.hpp"
  34 #include "gc/z/zRelocationSetSelector.inline.hpp"
  35 #include "gc/z/zResurrection.hpp"
  36 #include "gc/z/zStat.hpp"
  37 #include "gc/z/zThread.inline.hpp"
  38 #include "gc/z/zVerify.hpp"
  39 #include "gc/z/zWorkers.inline.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/iterator.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "runtime/handshake.hpp"
  44 #include "runtime/safepoint.hpp"
  45 #include "runtime/thread.hpp"
  46 #include "utilities/debug.hpp"
  47 
  48 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  49 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  50 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  51 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  52 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  53 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  54 
  55 ZHeap* ZHeap::_heap = NULL;
  56 
  57 ZHeap::ZHeap() :
  58     _workers(),
  59     _object_allocator(),
  60     _page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  61     _page_table(),
  62     _forwarding_table(),
  63     _mark(&_workers, &_page_table),
  64     _reference_processor(&_workers),
  65     _weak_roots_processor(&_workers),
  66     _relocate(&_workers),
  67     _relocation_set(),
  68     _unload(&_workers),
  69     _serviceability(heap_min_size(), heap_max_size()) {
  70   // Install global heap instance
  71   assert(_heap == NULL, "Already initialized");
  72   _heap = this;
  73 
  74   // Update statistics
  75   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  76 }
  77 
  78 size_t ZHeap::heap_min_size() const {
  79   return MAX2(MinHeapSize, heap_max_reserve_size());
  80 }
  81 
  82 size_t ZHeap::heap_initial_size() const {
  83   return MAX2(InitialHeapSize, heap_max_reserve_size());
  84 }
  85 
  86 size_t ZHeap::heap_max_size() const {
  87   return MaxHeapSize;
  88 }
  89 
  90 size_t ZHeap::heap_max_reserve_size() const {
  91   // Reserve one small page per worker plus one shared medium page. This is still just
  92   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  93   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  94   return MIN2(max_reserve_size, heap_max_size());
  95 }
  96 
  97 bool ZHeap::is_initialized() const {
  98   return _page_allocator.is_initialized() && _mark.is_initialized();
  99 }
 100 
 101 size_t ZHeap::min_capacity() const {
 102   return _page_allocator.min_capacity();
 103 }
 104 
 105 size_t ZHeap::max_capacity() const {
 106   return _page_allocator.max_capacity();
 107 }
 108 
 109 size_t ZHeap::soft_max_capacity() const {
 110   return _page_allocator.soft_max_capacity();
 111 }
 112 
 113 size_t ZHeap::capacity() const {
 114   return _page_allocator.capacity();
 115 }
 116 
 117 size_t ZHeap::max_reserve() const {
 118   return _page_allocator.max_reserve();
 119 }
 120 
 121 size_t ZHeap::used_high() const {
 122   return _page_allocator.used_high();
 123 }
 124 
 125 size_t ZHeap::used_low() const {
 126   return _page_allocator.used_low();
 127 }
 128 
 129 size_t ZHeap::used() const {
 130   return _page_allocator.used();
 131 }
 132 
 133 size_t ZHeap::unused() const {
 134   return _page_allocator.unused();
 135 }
 136 
 137 size_t ZHeap::allocated() const {
 138   return _page_allocator.allocated();
 139 }
 140 
 141 size_t ZHeap::reclaimed() const {
 142   return _page_allocator.reclaimed();
 143 }
 144 
 145 size_t ZHeap::tlab_capacity() const {
 146   return capacity();
 147 }
 148 
 149 size_t ZHeap::tlab_used() const {
 150   return _object_allocator.used();
 151 }
 152 
 153 size_t ZHeap::max_tlab_size() const {
 154   return ZObjectSizeLimitSmall;
 155 }
 156 
 157 size_t ZHeap::unsafe_max_tlab_alloc() const {
 158   size_t size = _object_allocator.remaining();
 159 
 160   if (size < MinTLABSize) {
 161     // The remaining space in the allocator is not enough to
 162     // fit the smallest possible TLAB. This means that the next
 163     // TLAB allocation will force the allocator to get a new
 164     // backing page anyway, which in turn means that we can then
 165     // fit the largest possible TLAB.
 166     size = max_tlab_size();
 167   }
 168 
 169   return MIN2(size, max_tlab_size());
 170 }
 171 
 172 bool ZHeap::is_in(uintptr_t addr) const {
 173   // An address is considered to be "in the heap" if it points into
 174   // the allocated part of a page, regardless of which heap view is
 175   // used. Note that an address with the finalizable metadata bit set
 176   // is not pointing into a heap view, and therefore not considered
 177   // to be "in the heap".
 178 
 179   if (ZAddress::is_in(addr)) {
 180     const ZPage* const page = _page_table.get(addr);
 181     if (page != NULL) {
 182       return page->is_in(addr);
 183     }
 184   }
 185 
 186   return false;
 187 }
 188 
 189 uint ZHeap::nconcurrent_worker_threads() const {
 190   return _workers.nconcurrent();
 191 }
 192 
 193 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 194   return _workers.nconcurrent_no_boost();
 195 }
 196 
 197 void ZHeap::set_boost_worker_threads(bool boost) {
 198   _workers.set_boost(boost);
 199 }
 200 
 201 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 202   _workers.threads_do(tc);
 203 }
 204 
 205 void ZHeap::print_worker_threads_on(outputStream* st) const {
 206   _workers.print_threads_on(st);
 207 }
 208 
 209 void ZHeap::out_of_memory() {
 210   ResourceMark rm;
 211 
 212   ZStatInc(ZCounterOutOfMemory);
 213   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 214 }
 215 
 216 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 217   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 218   if (page != NULL) {
 219     // Insert page table entry
 220     _page_table.insert(page);
 221   }
 222 
 223   return page;
 224 }
 225 
 226 void ZHeap::undo_alloc_page(ZPage* page) {
 227   assert(page->is_allocating(), "Invalid page state");
 228 
 229   ZStatInc(ZCounterUndoPageAllocation);
 230   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 231                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 232 
 233   free_page(page, false /* reclaimed */);
 234 }
 235 
 236 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 237   // Remove page table entry
 238   _page_table.remove(page);
 239 
 240   // Free page
 241   _page_allocator.free_page(page, reclaimed);
 242 }
 243 
 244 uint64_t ZHeap::uncommit() {
 245   return _page_allocator.uncommit();
 246 }
 247 
 248 void ZHeap::uncommit_cancel() {
 249   return _page_allocator.uncommit_cancel();
 250 }
 251 
 252 void ZHeap::flip_to_marked() {
 253   ZVerifyViewsFlip flip(&_page_allocator);
 254   ZAddress::flip_to_marked();
 255 }
 256 
 257 void ZHeap::flip_to_remapped() {
 258   ZVerifyViewsFlip flip(&_page_allocator);
 259   ZAddress::flip_to_remapped();
 260 }
 261 
 262 void ZHeap::mark_start() {
 263   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 264 
 265   // Update statistics
 266   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 267 
 268   // Flip address view
 269   flip_to_marked();
 270 
 271   // Retire allocating pages
 272   _object_allocator.retire_pages();
 273 
 274   // Reset allocated/reclaimed/used statistics
 275   _page_allocator.reset_statistics();
 276 
 277   // Reset encountered/dropped/enqueued statistics
 278   _reference_processor.reset_statistics();
 279 
 280   // Enter mark phase
 281   ZGlobalPhase = ZPhaseMark;
 282 
 283   // Reset marking information and mark roots
 284   _mark.start();
 285 
 286   // Update statistics
 287   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 288 }
 289 
 290 void ZHeap::mark(bool initial) {
 291   _mark.mark(initial);
 292 }
 293 
 294 void ZHeap::mark_flush_and_free(Thread* thread) {
 295   _mark.flush_and_free(thread);
 296 }
 297 
 298 bool ZHeap::mark_end() {
 299   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 300 
 301   // Try end marking
 302   if (!_mark.end()) {
 303     // Marking not completed, continue concurrent mark
 304     return false;
 305   }
 306 
 307   // Enter mark completed phase
 308   ZGlobalPhase = ZPhaseMarkCompleted;
 309 
 310   // Verify after mark
 311   ZVerify::after_mark();
 312 
 313   // Update statistics
 314   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 315   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 316 
 317   // Block resurrection of weak/phantom references
 318   ZResurrection::block();
 319 
 320   // Process weak roots
 321   _weak_roots_processor.process_weak_roots();
 322 
 323   // Prepare to unload stale metadata and nmethods
 324   _unload.prepare();
 325 
 326   return true;
 327 }
 328 
 329 void ZHeap::keep_alive(oop obj) {
 330   ZBarrier::keep_alive_barrier_on_oop(obj);
 331 }
 332 
 333 void ZHeap::set_soft_reference_policy(bool clear) {
 334   _reference_processor.set_soft_reference_policy(clear);
 335 }
 336 
 337 class ZRendezvousClosure : public HandshakeClosure {
 338 public:
 339   ZRendezvousClosure() :
 340       HandshakeClosure("ZRendezvous") {}
 341 
 342   void do_thread(Thread* thread) {}
 343 };
 344 
 345 void ZHeap::process_non_strong_references() {
 346   // Process Soft/Weak/Final/PhantomReferences
 347   _reference_processor.process_references();
 348 
 349   // Process concurrent weak roots
 350   _weak_roots_processor.process_concurrent_weak_roots();
 351 
 352   // Unlink stale metadata and nmethods
 353   _unload.unlink();
 354 
 355   // Perform a handshake. This is needed 1) to make sure that stale
 356   // metadata and nmethods are no longer observable. And 2), to
 357   // prevent the race where a mutator first loads an oop, which is
 358   // logically null but not yet cleared. Then this oop gets cleared
 359   // by the reference processor and resurrection is unblocked. At
 360   // this point the mutator could see the unblocked state and pass
 361   // this invalid oop through the normal barrier path, which would
 362   // incorrectly try to mark the oop.
 363   ZRendezvousClosure cl;
 364   Handshake::execute(&cl);
 365 
 366   // Unblock resurrection of weak/phantom references
 367   ZResurrection::unblock();
 368 
 369   // Purge stale metadata and nmethods that were unlinked
 370   _unload.purge();
 371 
 372   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 373   // must be done after unblocking resurrection. Otherwise the
 374   // Finalizer thread could call Reference.get() on the Finalizers
 375   // that were just enqueued, which would incorrectly return null
 376   // during the resurrection block window, since such referents
 377   // are only Finalizable marked.
 378   _reference_processor.enqueue_references();
 379 }
 380 
 381 void ZHeap::select_relocation_set() {
 382   // Do not allow pages to be deleted
 383   _page_allocator.enable_deferred_delete();
 384 
 385   // Register relocatable pages with selector
 386   ZRelocationSetSelector selector;
 387   ZPageTableIterator pt_iter(&_page_table);
 388   for (ZPage* page; pt_iter.next(&page);) {
 389     if (!page->is_relocatable()) {
 390       // Not relocatable, don't register
 391       continue;
 392     }
 393 
 394     if (page->is_marked()) {
 395       // Register live page
 396       selector.register_live_page(page);
 397     } else {
 398       // Register garbage page
 399       selector.register_garbage_page(page);
 400 
 401       // Reclaim page immediately
 402       free_page(page, true /* reclaimed */);
 403     }
 404   }
 405 
 406   // Allow pages to be deleted
 407   _page_allocator.disable_deferred_delete();
 408 
 409   // Select pages to relocate
 410   selector.select(&_relocation_set);
 411 
 412   // Setup forwarding table
 413   ZRelocationSetIterator rs_iter(&_relocation_set);
 414   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 415     _forwarding_table.insert(forwarding);
 416   }
 417 
 418   // Update statistics
 419   ZStatRelocation::set_at_select_relocation_set(selector.stats());
 420   ZStatHeap::set_at_select_relocation_set(selector.stats(), reclaimed());
 421 }
 422 
 423 void ZHeap::reset_relocation_set() {
 424   // Reset forwarding table
 425   ZRelocationSetIterator iter(&_relocation_set);
 426   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 427     _forwarding_table.remove(forwarding);
 428   }
 429 
 430   // Reset relocation set
 431   _relocation_set.reset();
 432 }
 433 
 434 void ZHeap::relocate_start() {
 435   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 436 
 437   // Finish unloading stale metadata and nmethods
 438   _unload.finish();
 439 
 440   // Flip address view
 441   flip_to_remapped();
 442 
 443   // Enter relocate phase
 444   ZGlobalPhase = ZPhaseRelocate;
 445 
 446   // Update statistics
 447   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 448   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 449 
 450   // Remap/Relocate roots
 451   _relocate.start();
 452 }
 453 
 454 void ZHeap::relocate() {
 455   // Relocate relocation set
 456   const bool success = _relocate.relocate(&_relocation_set);
 457 
 458   // Update statistics
 459   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 460   ZStatRelocation::set_at_relocate_end(success);
 461   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 462                                  used(), used_high(), used_low());
 463 }
 464 
 465 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
 466   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 467 
 468   ZHeapIterator iter;
 469   iter.objects_do(cl, visit_weaks);
 470 }
 471 
 472 void ZHeap::pages_do(ZPageClosure* cl) {
 473   ZPageTableIterator iter(&_page_table);
 474   for (ZPage* page; iter.next(&page);) {
 475     cl->do_page(page);
 476   }
 477   _page_allocator.pages_do(cl);
 478 }
 479 
 480 void ZHeap::serviceability_initialize() {
 481   _serviceability.initialize();
 482 }
 483 
 484 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 485   return _serviceability.memory_manager();
 486 }
 487 
 488 MemoryPool* ZHeap::serviceability_memory_pool() {
 489   return _serviceability.memory_pool();
 490 }
 491 
 492 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 493   return _serviceability.counters();
 494 }
 495 
 496 void ZHeap::print_on(outputStream* st) const {
 497   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 498                used() / M,
 499                capacity() / M,
 500                max_capacity() / M);
 501   MetaspaceUtils::print_on(st);
 502 }
 503 
 504 void ZHeap::print_extended_on(outputStream* st) const {
 505   print_on(st);
 506   st->cr();
 507 
 508   // Do not allow pages to be deleted
 509   _page_allocator.enable_deferred_delete();
 510 
 511   // Print all pages
 512   ZPageTableIterator iter(&_page_table);
 513   for (ZPage* page; iter.next(&page);) {
 514     page->print_on(st);
 515   }
 516 
 517   // Allow pages to be deleted
 518   _page_allocator.enable_deferred_delete();
 519 
 520   st->cr();
 521 }
 522 
 523 bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
 524   if (LocationPrinter::is_valid_obj((void*)addr)) {
 525     st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
 526     ZOop::from_address(addr)->print_on(st);
 527     return true;
 528   }
 529 
 530   return false;
 531 }
 532 
 533 void ZHeap::verify() {
 534   // Heap verification can only be done between mark end and
 535   // relocate start. This is the only window where all oop are
 536   // good and the whole heap is in a consistent state.
 537   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 538 
 539   ZVerify::after_weak_processing();
 540 }