1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zHeuristics.hpp"
  31 #include "gc/z/zMark.inline.hpp"
  32 #include "gc/z/zPage.inline.hpp"
  33 #include "gc/z/zPageTable.inline.hpp"
  34 #include "gc/z/zRelocationSet.inline.hpp"
  35 #include "gc/z/zRelocationSetSelector.inline.hpp"
  36 #include "gc/z/zResurrection.hpp"
  37 #include "gc/z/zStat.hpp"
  38 #include "gc/z/zTask.hpp"
  39 #include "gc/z/zThread.inline.hpp"
  40 #include "gc/z/zVerify.hpp"
  41 #include "gc/z/zWorkers.inline.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/iterator.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "runtime/handshake.hpp"
  46 #include "runtime/safepoint.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/debug.hpp"
  49 
  50 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  51 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  52 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  53 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  54 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  55 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  56 
  57 ZHeap* ZHeap::_heap = NULL;
  58 
  59 ZHeap::ZHeap() :
  60     _workers(),
  61     _object_allocator(),
  62     _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize, ZHeuristics::max_reserve()),
  63     _page_table(),
  64     _forwarding_table(),
  65     _mark(&_workers, &_page_table),
  66     _reference_processor(&_workers),
  67     _weak_roots_processor(&_workers),
  68     _relocate(&_workers),
  69     _relocation_set(),
  70     _unload(&_workers),
  71     _serviceability(min_capacity(), max_capacity()) {
  72   // Install global heap instance
  73   assert(_heap == NULL, "Already initialized");
  74   _heap = this;
  75 
  76   // Update statistics
  77   ZStatHeap::set_at_initialize(min_capacity(), max_capacity(), max_reserve());
  78 }
  79 
  80 bool ZHeap::is_initialized() const {
  81   return _page_allocator.is_initialized() && _mark.is_initialized();
  82 }
  83 
  84 size_t ZHeap::min_capacity() const {
  85   return _page_allocator.min_capacity();
  86 }
  87 
  88 size_t ZHeap::max_capacity() const {
  89   return _page_allocator.max_capacity();
  90 }
  91 
  92 size_t ZHeap::soft_max_capacity() const {
  93   return _page_allocator.soft_max_capacity();
  94 }
  95 
  96 size_t ZHeap::capacity() const {
  97   return _page_allocator.capacity();
  98 }
  99 
 100 size_t ZHeap::max_reserve() const {
 101   return _page_allocator.max_reserve();
 102 }
 103 
 104 size_t ZHeap::used_high() const {
 105   return _page_allocator.used_high();
 106 }
 107 
 108 size_t ZHeap::used_low() const {
 109   return _page_allocator.used_low();
 110 }
 111 
 112 size_t ZHeap::used() const {
 113   return _page_allocator.used();
 114 }
 115 
 116 size_t ZHeap::unused() const {
 117   return _page_allocator.unused();
 118 }
 119 
 120 size_t ZHeap::allocated() const {
 121   return _page_allocator.allocated();
 122 }
 123 
 124 size_t ZHeap::reclaimed() const {
 125   return _page_allocator.reclaimed();
 126 }
 127 
 128 size_t ZHeap::tlab_capacity() const {
 129   return capacity();
 130 }
 131 
 132 size_t ZHeap::tlab_used() const {
 133   return _object_allocator.used();
 134 }
 135 
 136 size_t ZHeap::max_tlab_size() const {
 137   return ZObjectSizeLimitSmall;
 138 }
 139 
 140 size_t ZHeap::unsafe_max_tlab_alloc() const {
 141   size_t size = _object_allocator.remaining();
 142 
 143   if (size < MinTLABSize) {
 144     // The remaining space in the allocator is not enough to
 145     // fit the smallest possible TLAB. This means that the next
 146     // TLAB allocation will force the allocator to get a new
 147     // backing page anyway, which in turn means that we can then
 148     // fit the largest possible TLAB.
 149     size = max_tlab_size();
 150   }
 151 
 152   return MIN2(size, max_tlab_size());
 153 }
 154 
 155 bool ZHeap::is_in(uintptr_t addr) const {
 156   // An address is considered to be "in the heap" if it points into
 157   // the allocated part of a page, regardless of which heap view is
 158   // used. Note that an address with the finalizable metadata bit set
 159   // is not pointing into a heap view, and therefore not considered
 160   // to be "in the heap".
 161 
 162   if (ZAddress::is_in(addr)) {
 163     const ZPage* const page = _page_table.get(addr);
 164     if (page != NULL) {
 165       return page->is_in(addr);
 166     }
 167   }
 168 
 169   return false;
 170 }
 171 
 172 uint ZHeap::nconcurrent_worker_threads() const {
 173   return _workers.nconcurrent();
 174 }
 175 
 176 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
 177   return _workers.nconcurrent_no_boost();
 178 }
 179 
 180 void ZHeap::set_boost_worker_threads(bool boost) {
 181   _workers.set_boost(boost);
 182 }
 183 
 184 void ZHeap::threads_do(ThreadClosure* tc) const {
 185   _page_allocator.threads_do(tc);
 186   _workers.threads_do(tc);
 187 }
 188 
 189 // Adapter class from AbstractGangTask to Ztask
 190 class ZAbstractGangTaskAdapter : public ZTask {
 191 private:
 192   AbstractGangTask* _task;
 193 
 194 public:
 195   ZAbstractGangTaskAdapter(AbstractGangTask* task) :
 196       ZTask(task->name()),
 197       _task(task) { }
 198 
 199   virtual void work() {
 200     _task->work(ZThread::worker_id());
 201   }
 202 };
 203 
 204 void ZHeap::run_task(AbstractGangTask* task) {
 205   ZAbstractGangTaskAdapter ztask(task);
 206   _workers.run_parallel(&ztask);
 207 }
 208 
 209 void ZHeap::out_of_memory() {
 210   ResourceMark rm;
 211 
 212   ZStatInc(ZCounterOutOfMemory);
 213   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
 214 }
 215 
 216 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 217   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
 218   if (page != NULL) {
 219     // Insert page table entry
 220     _page_table.insert(page);
 221   }
 222 
 223   return page;
 224 }
 225 
 226 void ZHeap::undo_alloc_page(ZPage* page) {
 227   assert(page->is_allocating(), "Invalid page state");
 228 
 229   ZStatInc(ZCounterUndoPageAllocation);
 230   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 231                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 232 
 233   free_page(page, false /* reclaimed */);
 234 }
 235 
 236 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 237   // Remove page table entry
 238   _page_table.remove(page);
 239 
 240   // Free page
 241   _page_allocator.free_page(page, reclaimed);
 242 }
 243 
 244 void ZHeap::flip_to_marked() {
 245   ZVerifyViewsFlip flip(&_page_allocator);
 246   ZAddress::flip_to_marked();
 247 }
 248 
 249 void ZHeap::flip_to_remapped() {
 250   ZVerifyViewsFlip flip(&_page_allocator);
 251   ZAddress::flip_to_remapped();
 252 }
 253 
 254 void ZHeap::mark_start() {
 255   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 256 
 257   // Update statistics
 258   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 259 
 260   // Flip address view
 261   flip_to_marked();
 262 
 263   // Retire allocating pages
 264   _object_allocator.retire_pages();
 265 
 266   // Reset allocated/reclaimed/used statistics
 267   _page_allocator.reset_statistics();
 268 
 269   // Reset encountered/dropped/enqueued statistics
 270   _reference_processor.reset_statistics();
 271 
 272   // Enter mark phase
 273   ZGlobalPhase = ZPhaseMark;
 274 
 275   // Reset marking information and mark roots
 276   _mark.start();
 277 
 278   // Update statistics
 279   ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
 280 }
 281 
 282 void ZHeap::mark(bool initial) {
 283   _mark.mark(initial);
 284 }
 285 
 286 void ZHeap::mark_flush_and_free(Thread* thread) {
 287   _mark.flush_and_free(thread);
 288 }
 289 
 290 bool ZHeap::mark_end() {
 291   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 292 
 293   // Try end marking
 294   if (!_mark.end()) {
 295     // Marking not completed, continue concurrent mark
 296     return false;
 297   }
 298 
 299   // Enter mark completed phase
 300   ZGlobalPhase = ZPhaseMarkCompleted;
 301 
 302   // Verify after mark
 303   ZVerify::after_mark();
 304 
 305   // Update statistics
 306   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 307   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 308 
 309   // Block resurrection of weak/phantom references
 310   ZResurrection::block();
 311 
 312   // Process weak roots
 313   _weak_roots_processor.process_weak_roots();
 314 
 315   // Prepare to unload stale metadata and nmethods
 316   _unload.prepare();
 317 
 318   return true;
 319 }
 320 
 321 void ZHeap::keep_alive(oop obj) {
 322   ZBarrier::keep_alive_barrier_on_oop(obj);
 323 }
 324 
 325 void ZHeap::set_soft_reference_policy(bool clear) {
 326   _reference_processor.set_soft_reference_policy(clear);
 327 }
 328 
 329 class ZRendezvousClosure : public HandshakeClosure {
 330 public:
 331   ZRendezvousClosure() :
 332       HandshakeClosure("ZRendezvous") {}
 333 
 334   void do_thread(Thread* thread) {}
 335 };
 336 
 337 void ZHeap::process_non_strong_references() {
 338   // Process Soft/Weak/Final/PhantomReferences
 339   _reference_processor.process_references();
 340 
 341   // Process concurrent weak roots
 342   _weak_roots_processor.process_concurrent_weak_roots();
 343 
 344   // Unlink stale metadata and nmethods
 345   _unload.unlink();
 346 
 347   // Perform a handshake. This is needed 1) to make sure that stale
 348   // metadata and nmethods are no longer observable. And 2), to
 349   // prevent the race where a mutator first loads an oop, which is
 350   // logically null but not yet cleared. Then this oop gets cleared
 351   // by the reference processor and resurrection is unblocked. At
 352   // this point the mutator could see the unblocked state and pass
 353   // this invalid oop through the normal barrier path, which would
 354   // incorrectly try to mark the oop.
 355   ZRendezvousClosure cl;
 356   Handshake::execute(&cl);
 357 
 358   // Unblock resurrection of weak/phantom references
 359   ZResurrection::unblock();
 360 
 361   // Purge stale metadata and nmethods that were unlinked
 362   _unload.purge();
 363 
 364   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 365   // must be done after unblocking resurrection. Otherwise the
 366   // Finalizer thread could call Reference.get() on the Finalizers
 367   // that were just enqueued, which would incorrectly return null
 368   // during the resurrection block window, since such referents
 369   // are only Finalizable marked.
 370   _reference_processor.enqueue_references();
 371 }
 372 
 373 void ZHeap::select_relocation_set() {
 374   // Do not allow pages to be deleted
 375   _page_allocator.enable_deferred_delete();
 376 
 377   // Register relocatable pages with selector
 378   ZRelocationSetSelector selector;
 379   ZPageTableIterator pt_iter(&_page_table);
 380   for (ZPage* page; pt_iter.next(&page);) {
 381     if (!page->is_relocatable()) {
 382       // Not relocatable, don't register
 383       continue;
 384     }
 385 
 386     if (page->is_marked()) {
 387       // Register live page
 388       selector.register_live_page(page);
 389     } else {
 390       // Register garbage page
 391       selector.register_garbage_page(page);
 392 
 393       // Reclaim page immediately
 394       free_page(page, true /* reclaimed */);
 395     }
 396   }
 397 
 398   // Allow pages to be deleted
 399   _page_allocator.disable_deferred_delete();
 400 
 401   // Select pages to relocate
 402   selector.select(&_relocation_set);
 403 
 404   // Setup forwarding table
 405   ZRelocationSetIterator rs_iter(&_relocation_set);
 406   for (ZForwarding* forwarding; rs_iter.next(&forwarding);) {
 407     _forwarding_table.insert(forwarding);
 408   }
 409 
 410   // Update statistics
 411   ZStatRelocation::set_at_select_relocation_set(selector.stats());
 412   ZStatHeap::set_at_select_relocation_set(selector.stats(), reclaimed());
 413 }
 414 
 415 void ZHeap::reset_relocation_set() {
 416   // Reset forwarding table
 417   ZRelocationSetIterator iter(&_relocation_set);
 418   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 419     _forwarding_table.remove(forwarding);
 420   }
 421 
 422   // Reset relocation set
 423   _relocation_set.reset();
 424 }
 425 
 426 void ZHeap::relocate_start() {
 427   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 428 
 429   // Finish unloading stale metadata and nmethods
 430   _unload.finish();
 431 
 432   // Flip address view
 433   flip_to_remapped();
 434 
 435   // Enter relocate phase
 436   ZGlobalPhase = ZPhaseRelocate;
 437 
 438   // Update statistics
 439   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 440   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 441 
 442   // Remap/Relocate roots
 443   _relocate.start();
 444 }
 445 
 446 void ZHeap::relocate() {
 447   // Relocate relocation set
 448   const bool success = _relocate.relocate(&_relocation_set);
 449 
 450   // Update statistics
 451   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
 452   ZStatRelocation::set_at_relocate_end(success);
 453   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
 454                                  used(), used_high(), used_low());
 455 }
 456 
 457 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
 458   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 459 
 460   ZHeapIterator iter;
 461   iter.objects_do(cl, visit_weaks);
 462 }
 463 
 464 void ZHeap::pages_do(ZPageClosure* cl) {
 465   ZPageTableIterator iter(&_page_table);
 466   for (ZPage* page; iter.next(&page);) {
 467     cl->do_page(page);
 468   }
 469   _page_allocator.pages_do(cl);
 470 }
 471 
 472 void ZHeap::serviceability_initialize() {
 473   _serviceability.initialize();
 474 }
 475 
 476 GCMemoryManager* ZHeap::serviceability_memory_manager() {
 477   return _serviceability.memory_manager();
 478 }
 479 
 480 MemoryPool* ZHeap::serviceability_memory_pool() {
 481   return _serviceability.memory_pool();
 482 }
 483 
 484 ZServiceabilityCounters* ZHeap::serviceability_counters() {
 485   return _serviceability.counters();
 486 }
 487 
 488 void ZHeap::print_on(outputStream* st) const {
 489   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
 490                used() / M,
 491                capacity() / M,
 492                max_capacity() / M);
 493   MetaspaceUtils::print_on(st);
 494 }
 495 
 496 void ZHeap::print_extended_on(outputStream* st) const {
 497   print_on(st);
 498   st->cr();
 499 
 500   // Do not allow pages to be deleted
 501   _page_allocator.enable_deferred_delete();
 502 
 503   // Print all pages
 504   st->print_cr("ZGC Page Table:");
 505   ZPageTableIterator iter(&_page_table);
 506   for (ZPage* page; iter.next(&page);) {
 507     page->print_on(st);
 508   }
 509 
 510   // Allow pages to be deleted
 511   _page_allocator.enable_deferred_delete();
 512 }
 513 
 514 bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
 515   if (LocationPrinter::is_valid_obj((void*)addr)) {
 516     st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
 517     ZOop::from_address(addr)->print_on(st);
 518     return true;
 519   }
 520 
 521   return false;
 522 }
 523 
 524 void ZHeap::verify() {
 525   // Heap verification can only be done between mark end and
 526   // relocate start. This is the only window where all oop are
 527   // good and the whole heap is in a consistent state.
 528   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
 529 
 530   ZVerify::after_weak_processing();
 531 }