1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zHeuristics.hpp"
  31 #include "gc/z/zMark.inline.hpp"
  32 #include "gc/z/zPage.inline.hpp"
  33 #include "gc/z/zPageTable.inline.hpp"
  34 #include "gc/z/zRelocationSet.inline.hpp"
  35 #include "gc/z/zRelocationSetSelector.inline.hpp"
  36 #include "gc/z/zResurrection.hpp"
  37 #include "gc/z/zStat.hpp"
  38 #include "gc/z/zThread.inline.hpp"
  39 #include "gc/z/zVerify.hpp"
  40 #include "gc/z/zWorkers.inline.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/iterator.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "runtime/handshake.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/thread.hpp"
  47 #include "utilities/debug.hpp"
  48 
  49 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  50 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  51 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  52 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  53 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  54 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  55 
  56 ZHeap* ZHeap::_heap = NULL;
  57 
  58 ZHeap::ZHeap() :
  59     _workers(),
  60     _object_allocator(),
  61     _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize, ZHeuristics::max_reserve()),
  62     _page_table(),
  63     _forwarding_table(),
  64     _mark(&_workers, &_page_table),
  65     _reference_processor(&_workers),
  66     _weak_roots_processor(&_workers),
  67     _relocate(&_workers),
  68     _relocation_set(),
  69     _unload(&_workers),
  70     _serviceability(min_capacity(), max_capacity()) {
  71   // Install global heap instance
  72   assert(_heap == NULL, "Already initialized");
  73   _heap = this;
  74 
  75   // Update statistics
  76   ZStatHeap::set_at_initialize(min_capacity(), max_capacity(), max_reserve());
  77 }
  78 
  79 bool ZHeap::is_initialized() const {
  80   return _page_allocator.is_initialized() && _mark.is_initialized();
  81 }
  82 
  83 size_t ZHeap::min_capacity() const {
  84   return _page_allocator.min_capacity();
  85 }
  86 
  87 size_t ZHeap::max_capacity() const {
  88   return _page_allocator.max_capacity();
  89 }
  90 
  91 size_t ZHeap::soft_max_capacity() const {
  92   return _page_allocator.soft_max_capacity();
  93 }
  94 
  95 size_t ZHeap::capacity() const {
  96   return _page_allocator.capacity();
  97 }
  98 
  99 size_t ZHeap::max_reserve() const {
 100   return _page_allocator.max_reserve();
 101 }
 102 
 103 size_t ZHeap::used_high() const {
 104   return _page_allocator.used_high();
 105 }
 106 
 107 size_t ZHeap::used_low() const {
 108   return _page_allocator.used_low();
 109 }
 110 
 111 size_t ZHeap::used() const {
 112   return _page_allocator.used();
 113 }
 114 
 115 size_t ZHeap::unused() const {
 116   return _page_allocator.unused();
 117 }
 118 
 119 size_t ZHeap::allocated() const {
 120   return _page_allocator.allocated();
 121 }
 122 
 123 size_t ZHeap::reclaimed() const {
 124   return _page_allocator.reclaimed();
 125 }
 126 
 127 size_t ZHeap::tlab_capacity() const {
 128   return capacity();
 129 }
 130 
 131 size_t ZHeap::tlab_used() const {
 132   return _object_allocator.used();
 133 }
 134 
 135 size_t ZHeap::max_tlab_size() const {
 136   return ZObjectSizeLimitSmall;
 137 }
 138 
 139 size_t ZHeap::unsafe_max_tlab_alloc() const {
 140   size_t size = _object_allocator.remaining();
 141 
 142   if (size < MinTLABSize) {
 143     // The remaining space in the allocator is not enough to
 144     // fit the smallest possible TLAB. This means that the next
 145     // TLAB allocation will force the allocator to get a new
 146     // backing page anyway, which in turn means that we can then
 147     // fit the largest possible TLAB.
 148     size = max_tlab_size();
 149   }
 150 
 151   return MIN2(size, max_tlab_size());
 152 }
 153 
 154 bool ZHeap::is_in(uintptr_t addr) const {
 155   // An address is considered to be "in the heap" if it points into
 156   // the allocated part of a page, regardless of which heap view is
 157   // used. Note that an address with the finalizable metadata bit set
 158   // is not pointing into a heap view, and therefore not considered
 159   // to be "in the heap".
 160 
 161   if (ZAddress::is_in(addr)) {
 162     const ZPage* const page = _page_table.get(addr);
 163     if (page != NULL) {
 164       return page->is_in(addr);
 165     }
 166   }
 167 
 168   return false;
 169 }
 170 
 171 uint ZHeap::nconcurrent_worker_threads() const {
 172   return _workers.nconcurrent();
 173 }
 174 
 175 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 176   return _workers.nconcurrent_no_boost();
 177 }
 178 
 179 void ZHeap::set_boost_worker_threads(bool boost) {
 180   _workers.set_boost(boost);
 181 }
 182 
 183 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
 184   _workers.threads_do(tc);
 185 }
 186 
 187 void ZHeap::print_worker_threads_on(outputStream* st) const {
 188   _workers.print_threads_on(st);
 189 }
 190 
 191 void ZHeap::out_of_memory() {
 192   ResourceMark rm;
 193 
 194   ZStatInc(ZCounterOutOfMemory);
 195   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 196 }
 197 
 198 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 199   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 200   if (page != NULL) {
 201     // Insert page table entry
 202     _page_table.insert(page);
 203   }
 204 
 205   return page;
 206 }
 207 
 208 void ZHeap::undo_alloc_page(ZPage* page) {
 209   assert(page->is_allocating(), "Invalid page state");
 210 
 211   ZStatInc(ZCounterUndoPageAllocation);
 212   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 213                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 214 
 215   free_page(page, false /* reclaimed */);
 216 }
 217 
 218 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 219   // Remove page table entry
 220   _page_table.remove(page);
 221 
 222   // Free page
 223   _page_allocator.free_page(page, reclaimed);
 224 }
 225 
 226 void ZHeap::unmap_run() {
 227   _page_allocator.unmap_run();
 228 }
 229 
 230 void ZHeap::unmap_stop() {
 231   _page_allocator.unmap_stop();
 232 }
 233 
 234 void ZHeap::uncommit_run() {
 235   _page_allocator.uncommit_run();
 236 }
 237 
 238 void ZHeap::uncommit_stop() {
 239   _page_allocator.uncommit_stop();
 240 }
 241 
 242 void ZHeap::flip_to_marked() {
 243   ZVerifyViewsFlip flip(&_page_allocator);
 244   ZAddress::flip_to_marked();
 245 }
 246 
 247 void ZHeap::flip_to_remapped() {
 248   ZVerifyViewsFlip flip(&_page_allocator);
 249   ZAddress::flip_to_remapped();
 250 }
 251 
 252 void ZHeap::mark_start() {
 253   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 254 
 255   // Update statistics
 256   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 257 
 258   // Flip address view
 259   flip_to_marked();
 260 
 261   // Retire allocating pages
 262   _object_allocator.retire_pages();
 263 
 264   // Reset allocated/reclaimed/used statistics
 265   _page_allocator.reset_statistics();
 266 
 267   // Reset encountered/dropped/enqueued statistics
 268   _reference_processor.reset_statistics();
 269 
 270   // Enter mark phase
 271   ZGlobalPhase = ZPhaseMark;
 272 
 273   // Reset marking information and mark roots
 274   _mark.start();
 275 
 276   // Update statistics
 277   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 278 }
 279 
 280 void ZHeap::mark(bool initial) {
 281   _mark.mark(initial);
 282 }
 283 
 284 void ZHeap::mark_flush_and_free(Thread* thread) {
 285   _mark.flush_and_free(thread);
 286 }
 287 
 288 bool ZHeap::mark_end() {
 289   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 290 
 291   // Try end marking
 292   if (!_mark.end()) {
 293     // Marking not completed, continue concurrent mark
 294     return false;
 295   }
 296 
 297   // Enter mark completed phase
 298   ZGlobalPhase = ZPhaseMarkCompleted;
 299 
 300   // Verify after mark
 301   ZVerify::after_mark();
 302 
 303   // Update statistics
 304   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 305   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 306 
 307   // Block resurrection of weak/phantom references
 308   ZResurrection::block();
 309 
 310   // Process weak roots
 311   _weak_roots_processor.process_weak_roots();
 312 
 313   // Prepare to unload stale metadata and nmethods
 314   _unload.prepare();
 315 
 316   return true;
 317 }
 318 
 319 void ZHeap::keep_alive(oop obj) {
 320   ZBarrier::keep_alive_barrier_on_oop(obj);
 321 }
 322 
 323 void ZHeap::set_soft_reference_policy(bool clear) {
 324   _reference_processor.set_soft_reference_policy(clear);
 325 }
 326 
 327 class ZRendezvousClosure : public HandshakeClosure {
 328 public:
 329   ZRendezvousClosure() :
 330       HandshakeClosure("ZRendezvous") {}
 331 
 332   void do_thread(Thread* thread) {}
 333 };
 334 
 335 void ZHeap::process_non_strong_references() {
 336   // Process Soft/Weak/Final/PhantomReferences
 337   _reference_processor.process_references();
 338 
 339   // Process concurrent weak roots
 340   _weak_roots_processor.process_concurrent_weak_roots();
 341 
 342   // Unlink stale metadata and nmethods
 343   _unload.unlink();
 344 
 345   // Perform a handshake. This is needed 1) to make sure that stale
 346   // metadata and nmethods are no longer observable. And 2), to
 347   // prevent the race where a mutator first loads an oop, which is
 348   // logically null but not yet cleared. Then this oop gets cleared
 349   // by the reference processor and resurrection is unblocked. At
 350   // this point the mutator could see the unblocked state and pass
 351   // this invalid oop through the normal barrier path, which would
 352   // incorrectly try to mark the oop.
 353   ZRendezvousClosure cl;
 354   Handshake::execute(&cl);
 355 
 356   // Unblock resurrection of weak/phantom references
 357   ZResurrection::unblock();
 358 
 359   // Purge stale metadata and nmethods that were unlinked
 360   _unload.purge();
 361 
 362   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 363   // must be done after unblocking resurrection. Otherwise the
 364   // Finalizer thread could call Reference.get() on the Finalizers
 365   // that were just enqueued, which would incorrectly return null
 366   // during the resurrection block window, since such referents
 367   // are only Finalizable marked.
 368   _reference_processor.enqueue_references();
 369 }
 370 
 371 void ZHeap::select_relocation_set() {
 372   // Do not allow pages to be deleted
 373   _page_allocator.enable_deferred_delete();
 374 
 375   // Register relocatable pages with selector
 376   ZRelocationSetSelector selector;
 377   ZPageTableIterator pt_iter(&_page_table);
 378   for (ZPage* page; pt_iter.next(&page);) {
 379     if (!page->is_relocatable()) {
 380       // Not relocatable, don't register
 381       continue;
 382     }
 383 
 384     if (page->is_marked()) {
 385       // Register live page
 386       selector.register_live_page(page);
 387     } else {
 388       // Register garbage page
 389       selector.register_garbage_page(page);
 390 
 391       // Reclaim page immediately
 392       free_page(page, true /* reclaimed */);
 393     }
 394   }
 395 
 396   // Allow pages to be deleted
 397   _page_allocator.disable_deferred_delete();
 398 
 399   // Select pages to relocate
 400   selector.select(&_relocation_set);
 401 
 402   // Setup forwarding table
 403   ZRelocationSetIterator rs_iter(&_relocation_set);
 404   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 405     _forwarding_table.insert(forwarding);
 406   }
 407 
 408   // Update statistics
 409   ZStatRelocation::set_at_select_relocation_set(selector.stats());
 410   ZStatHeap::set_at_select_relocation_set(selector.stats(), reclaimed());
 411 }
 412 
 413 void ZHeap::reset_relocation_set() {
 414   // Reset forwarding table
 415   ZRelocationSetIterator iter(&_relocation_set);
 416   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 417     _forwarding_table.remove(forwarding);
 418   }
 419 
 420   // Reset relocation set
 421   _relocation_set.reset();
 422 }
 423 
 424 void ZHeap::relocate_start() {
 425   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 426 
 427   // Finish unloading stale metadata and nmethods
 428   _unload.finish();
 429 
 430   // Flip address view
 431   flip_to_remapped();
 432 
 433   // Enter relocate phase
 434   ZGlobalPhase = ZPhaseRelocate;
 435 
 436   // Update statistics
 437   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 438   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 439 
 440   // Remap/Relocate roots
 441   _relocate.start();
 442 }
 443 
 444 void ZHeap::relocate() {
 445   // Relocate relocation set
 446   const bool success = _relocate.relocate(&_relocation_set);
 447 
 448   // Update statistics
 449   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 450   ZStatRelocation::set_at_relocate_end(success);
 451   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 452                                  used(), used_high(), used_low());
 453 }
 454 
 455 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
 456   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 457 
 458   ZHeapIterator iter;
 459   iter.objects_do(cl, visit_weaks);
 460 }
 461 
 462 void ZHeap::pages_do(ZPageClosure* cl) {
 463   ZPageTableIterator iter(&_page_table);
 464   for (ZPage* page; iter.next(&page);) {
 465     cl->do_page(page);
 466   }
 467   _page_allocator.pages_do(cl);
 468 }
 469 
 470 void ZHeap::serviceability_initialize() {
 471   _serviceability.initialize();
 472 }
 473 
 474 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 475   return _serviceability.memory_manager();
 476 }
 477 
 478 MemoryPool* ZHeap::serviceability_memory_pool() {
 479   return _serviceability.memory_pool();
 480 }
 481 
 482 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 483   return _serviceability.counters();
 484 }
 485 
 486 void ZHeap::print_on(outputStream* st) const {
 487   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 488                used() / M,
 489                capacity() / M,
 490                max_capacity() / M);
 491   MetaspaceUtils::print_on(st);
 492 }
 493 
 494 void ZHeap::print_extended_on(outputStream* st) const {
 495   print_on(st);
 496   st->cr();
 497 
 498   // Do not allow pages to be deleted
 499   _page_allocator.enable_deferred_delete();
 500 
 501   // Print all pages
 502   ZPageTableIterator iter(&_page_table);
 503   for (ZPage* page; iter.next(&page);) {
 504     page->print_on(st);
 505   }
 506 
 507   // Allow pages to be deleted
 508   _page_allocator.enable_deferred_delete();
 509 
 510   st->cr();
 511 }
 512 
 513 bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
 514   if (LocationPrinter::is_valid_obj((void*)addr)) {
 515     st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
 516     ZOop::from_address(addr)->print_on(st);
 517     return true;
 518   }
 519 
 520   return false;
 521 }
 522 
 523 void ZHeap::verify() {
 524   // Heap verification can only be done between mark end and
 525   // relocate start. This is the only window where all oop are
 526   // good and the whole heap is in a consistent state.
 527   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 528 
 529   ZVerify::after_weak_processing();
 530 }