1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gcArguments.hpp" 26 #include "gc/shared/oopStorage.hpp" 27 #include "gc/z/zAddress.hpp" 28 #include "gc/z/zGlobals.hpp" 29 #include "gc/z/zHeap.inline.hpp" 30 #include "gc/z/zHeapIterator.hpp" 31 #include "gc/z/zList.inline.hpp" 32 #include "gc/z/zLock.inline.hpp" 33 #include "gc/z/zMark.inline.hpp" 34 #include "gc/z/zOopClosures.inline.hpp" 35 #include "gc/z/zPage.inline.hpp" 36 #include "gc/z/zPageTable.inline.hpp" 37 #include "gc/z/zRelocationSet.inline.hpp" 38 #include "gc/z/zResurrection.hpp" 39 #include "gc/z/zRootsIterator.hpp" 40 #include "gc/z/zStat.hpp" 41 #include "gc/z/zTask.hpp" 42 #include "gc/z/zThread.hpp" 43 #include "gc/z/zTracer.inline.hpp" 44 #include "gc/z/zVirtualMemory.inline.hpp" 45 #include "gc/z/zWorkers.inline.hpp" 46 #include "logging/log.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/arguments.hpp" 50 #include "runtime/safepoint.hpp" 51 #include "runtime/thread.hpp" 52 #include "utilities/align.hpp" 53 #include "utilities/debug.hpp" 54 55 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes); 56 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes); 57 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes); 58 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes); 59 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond); 60 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond); 61 62 ZHeap* ZHeap::_heap = NULL; 63 64 ZHeap::ZHeap() : 65 _workers(), 66 _object_allocator(_workers.nworkers()), 67 _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()), 68 _page_table(), 69 _forwarding_table(), 70 _mark(&_workers, &_page_table), 71 _reference_processor(&_workers), 72 _weak_roots_processor(&_workers), 73 _relocate(&_workers), 74 _relocation_set(), 75 _unload(&_workers), 76 _serviceability(heap_min_size(), heap_max_size()) { 77 // Install global heap instance 78 assert(_heap == NULL, "Already initialized"); 79 _heap = this; 80 81 // Update statistics 82 ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size()); 83 } 84 85 size_t ZHeap::heap_min_size() const { 86 return MinHeapSize; 87 } 88 89 size_t ZHeap::heap_initial_size() const { 90 return InitialHeapSize; 91 } 92 93 size_t ZHeap::heap_max_size() const { 94 return MaxHeapSize; 95 } 96 97 size_t ZHeap::heap_max_reserve_size() const { 98 // Reserve one small page per worker plus one shared medium page. This is still just 99 // an estimate and doesn't guarantee that we can't run out of memory during relocation. 100 const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium; 101 return MIN2(max_reserve_size, heap_max_size()); 102 } 103 104 bool ZHeap::is_initialized() const { 105 return _page_allocator.is_initialized() && _mark.is_initialized(); 106 } 107 108 size_t ZHeap::min_capacity() const { 109 return _page_allocator.min_capacity(); 110 } 111 112 size_t ZHeap::max_capacity() const { 113 return _page_allocator.max_capacity(); 114 } 115 116 size_t ZHeap::current_max_capacity() const { 117 return _page_allocator.current_max_capacity(); 118 } 119 120 size_t ZHeap::capacity() const { 121 return _page_allocator.capacity(); 122 } 123 124 size_t ZHeap::max_reserve() const { 125 return _page_allocator.max_reserve(); 126 } 127 128 size_t ZHeap::used_high() const { 129 return _page_allocator.used_high(); 130 } 131 132 size_t ZHeap::used_low() const { 133 return _page_allocator.used_low(); 134 } 135 136 size_t ZHeap::used() const { 137 return _page_allocator.used(); 138 } 139 140 size_t ZHeap::unused() const { 141 return _page_allocator.unused(); 142 } 143 144 size_t ZHeap::allocated() const { 145 return _page_allocator.allocated(); 146 } 147 148 size_t ZHeap::reclaimed() const { 149 return _page_allocator.reclaimed(); 150 } 151 152 size_t ZHeap::tlab_capacity() const { 153 return capacity(); 154 } 155 156 size_t ZHeap::tlab_used() const { 157 return _object_allocator.used(); 158 } 159 160 size_t ZHeap::max_tlab_size() const { 161 return ZObjectSizeLimitSmall; 162 } 163 164 size_t ZHeap::unsafe_max_tlab_alloc() const { 165 size_t size = _object_allocator.remaining(); 166 167 if (size < MinTLABSize) { 168 // The remaining space in the allocator is not enough to 169 // fit the smallest possible TLAB. This means that the next 170 // TLAB allocation will force the allocator to get a new 171 // backing page anyway, which in turn means that we can then 172 // fit the largest possible TLAB. 173 size = max_tlab_size(); 174 } 175 176 return MIN2(size, max_tlab_size()); 177 } 178 179 bool ZHeap::is_in(uintptr_t addr) const { 180 if (addr < ZAddressReservedStart || addr >= ZAddressReservedEnd) { 181 return false; 182 } 183 184 const ZPage* const page = _page_table.get(addr); 185 if (page != NULL) { 186 return page->is_in(addr); 187 } 188 189 return false; 190 } 191 192 uintptr_t ZHeap::block_start(uintptr_t addr) const { 193 const ZPage* const page = _page_table.get(addr); 194 return page->block_start(addr); 195 } 196 197 bool ZHeap::block_is_obj(uintptr_t addr) const { 198 const ZPage* const page = _page_table.get(addr); 199 return page->block_is_obj(addr); 200 } 201 202 uint ZHeap::nconcurrent_worker_threads() const { 203 return _workers.nconcurrent(); 204 } 205 206 uint ZHeap::nconcurrent_no_boost_worker_threads() const { 207 return _workers.nconcurrent_no_boost(); 208 } 209 210 void ZHeap::set_boost_worker_threads(bool boost) { 211 _workers.set_boost(boost); 212 } 213 214 void ZHeap::worker_threads_do(ThreadClosure* tc) const { 215 _workers.threads_do(tc); 216 } 217 218 void ZHeap::print_worker_threads_on(outputStream* st) const { 219 _workers.print_threads_on(st); 220 } 221 222 void ZHeap::out_of_memory() { 223 ResourceMark rm; 224 225 ZStatInc(ZCounterOutOfMemory); 226 log_info(gc)("Out Of Memory (%s)", Thread::current()->name()); 227 } 228 229 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { 230 ZPage* const page = _page_allocator.alloc_page(type, size, flags); 231 if (page != NULL) { 232 // Insert page table entry 233 _page_table.insert(page); 234 } 235 236 return page; 237 } 238 239 void ZHeap::undo_alloc_page(ZPage* page) { 240 assert(page->is_allocating(), "Invalid page state"); 241 242 ZStatInc(ZCounterUndoPageAllocation); 243 log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT, 244 ZThread::id(), ZThread::name(), p2i(page), page->size()); 245 246 free_page(page, false /* reclaimed */); 247 } 248 249 void ZHeap::free_page(ZPage* page, bool reclaimed) { 250 // Remove page table entry 251 _page_table.remove(page); 252 253 // Free page 254 _page_allocator.free_page(page, reclaimed); 255 } 256 257 uint64_t ZHeap::uncommit(uint64_t delay) { 258 return _page_allocator.uncommit(delay); 259 } 260 261 void ZHeap::before_flip() { 262 if (ZVerifyViews) { 263 // Unmap all pages 264 _page_allocator.debug_unmap_all_pages(); 265 } 266 } 267 268 void ZHeap::after_flip() { 269 if (ZVerifyViews) { 270 // Map all pages 271 ZPageTableIterator iter(&_page_table); 272 for (ZPage* page; iter.next(&page);) { 273 _page_allocator.debug_map_page(page); 274 } 275 _page_allocator.debug_map_cached_pages(); 276 } 277 } 278 279 void ZHeap::flip_to_marked() { 280 before_flip(); 281 ZAddress::flip_to_marked(); 282 after_flip(); 283 } 284 285 void ZHeap::flip_to_remapped() { 286 before_flip(); 287 ZAddress::flip_to_remapped(); 288 after_flip(); 289 } 290 291 void ZHeap::mark_start() { 292 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 293 294 // Update statistics 295 ZStatSample(ZSamplerHeapUsedBeforeMark, used()); 296 297 // Flip address view 298 flip_to_marked(); 299 300 // Retire allocating pages 301 _object_allocator.retire_pages(); 302 303 // Reset allocated/reclaimed/used statistics 304 _page_allocator.reset_statistics(); 305 306 // Reset encountered/dropped/enqueued statistics 307 _reference_processor.reset_statistics(); 308 309 // Enter mark phase 310 ZGlobalPhase = ZPhaseMark; 311 312 // Reset marking information and mark roots 313 _mark.start(); 314 315 // Update statistics 316 ZStatHeap::set_at_mark_start(capacity(), used()); 317 } 318 319 void ZHeap::mark(bool initial) { 320 _mark.mark(initial); 321 } 322 323 void ZHeap::mark_flush_and_free(Thread* thread) { 324 _mark.flush_and_free(thread); 325 } 326 327 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure { 328 public: 329 virtual void do_oop(oop* p) { 330 ZBarrier::mark_barrier_on_root_oop_field(p); 331 } 332 333 virtual void do_oop(narrowOop* p) { 334 ShouldNotReachHere(); 335 } 336 }; 337 338 class ZFixupPartialLoadsTask : public ZTask { 339 private: 340 ZThreadRootsIterator _thread_roots; 341 342 public: 343 ZFixupPartialLoadsTask() : 344 ZTask("ZFixupPartialLoadsTask"), 345 _thread_roots() {} 346 347 virtual void work() { 348 ZFixupPartialLoadsClosure cl; 349 _thread_roots.oops_do(&cl); 350 } 351 }; 352 353 void ZHeap::fixup_partial_loads() { 354 ZFixupPartialLoadsTask task; 355 _workers.run_parallel(&task); 356 } 357 358 bool ZHeap::mark_end() { 359 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 360 361 // C2 can generate code where a safepoint poll is inserted 362 // between a load and the associated load barrier. To handle 363 // this case we need to rescan the thread stack here to make 364 // sure such oops are marked. 365 fixup_partial_loads(); 366 367 // Try end marking 368 if (!_mark.end()) { 369 // Marking not completed, continue concurrent mark 370 return false; 371 } 372 373 // Enter mark completed phase 374 ZGlobalPhase = ZPhaseMarkCompleted; 375 376 // Update statistics 377 ZStatSample(ZSamplerHeapUsedAfterMark, used()); 378 ZStatHeap::set_at_mark_end(capacity(), allocated(), used()); 379 380 // Block resurrection of weak/phantom references 381 ZResurrection::block(); 382 383 // Process weak roots 384 _weak_roots_processor.process_weak_roots(); 385 386 // Prepare to unload unused classes and code 387 _unload.prepare(); 388 389 return true; 390 } 391 392 void ZHeap::set_soft_reference_policy(bool clear) { 393 _reference_processor.set_soft_reference_policy(clear); 394 } 395 396 void ZHeap::process_non_strong_references() { 397 // Process Soft/Weak/Final/PhantomReferences 398 _reference_processor.process_references(); 399 400 // Process concurrent weak roots 401 _weak_roots_processor.process_concurrent_weak_roots(); 402 403 // Unload unused classes and code 404 _unload.unload(); 405 406 // Unblock resurrection of weak/phantom references 407 ZResurrection::unblock(); 408 409 // Enqueue Soft/Weak/Final/PhantomReferences. Note that this 410 // must be done after unblocking resurrection. Otherwise the 411 // Finalizer thread could call Reference.get() on the Finalizers 412 // that were just enqueued, which would incorrectly return null 413 // during the resurrection block window, since such referents 414 // are only Finalizable marked. 415 _reference_processor.enqueue_references(); 416 } 417 418 void ZHeap::select_relocation_set() { 419 // Do not allow pages to be deleted 420 _page_allocator.enable_deferred_delete(); 421 422 // Register relocatable pages with selector 423 ZRelocationSetSelector selector; 424 ZPageTableIterator pt_iter(&_page_table); 425 for (ZPage* page; pt_iter.next(&page);) { 426 if (!page->is_relocatable()) { 427 // Not relocatable, don't register 428 continue; 429 } 430 431 if (page->is_marked()) { 432 // Register live page 433 selector.register_live_page(page); 434 } else { 435 // Register garbage page 436 selector.register_garbage_page(page); 437 438 // Reclaim page immediately 439 free_page(page, true /* reclaimed */); 440 } 441 } 442 443 // Allow pages to be deleted 444 _page_allocator.disable_deferred_delete(); 445 446 // Select pages to relocate 447 selector.select(&_relocation_set); 448 449 // Setup forwarding table 450 ZRelocationSetIterator rs_iter(&_relocation_set); 451 for (ZForwarding* forwarding; rs_iter.next(&forwarding);) { 452 _forwarding_table.insert(forwarding); 453 } 454 455 // Update statistics 456 ZStatRelocation::set_at_select_relocation_set(selector.relocating()); 457 ZStatHeap::set_at_select_relocation_set(selector.live(), 458 selector.garbage(), 459 reclaimed()); 460 } 461 462 void ZHeap::reset_relocation_set() { 463 // Reset forwarding table 464 ZRelocationSetIterator iter(&_relocation_set); 465 for (ZForwarding* forwarding; iter.next(&forwarding);) { 466 _forwarding_table.remove(forwarding); 467 } 468 469 // Reset relocation set 470 _relocation_set.reset(); 471 } 472 473 void ZHeap::relocate_start() { 474 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 475 476 // Finish unloading of classes and code 477 _unload.finish(); 478 479 // Flip address view 480 flip_to_remapped(); 481 482 // Enter relocate phase 483 ZGlobalPhase = ZPhaseRelocate; 484 485 // Update statistics 486 ZStatSample(ZSamplerHeapUsedBeforeRelocation, used()); 487 ZStatHeap::set_at_relocate_start(capacity(), allocated(), used()); 488 489 // Remap/Relocate roots 490 _relocate.start(); 491 } 492 493 void ZHeap::relocate() { 494 // Relocate relocation set 495 const bool success = _relocate.relocate(&_relocation_set); 496 497 // Update statistics 498 ZStatSample(ZSamplerHeapUsedAfterRelocation, used()); 499 ZStatRelocation::set_at_relocate_end(success); 500 ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(), 501 used(), used_high(), used_low()); 502 } 503 504 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) { 505 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 506 507 ZHeapIterator iter(visit_referents); 508 iter.objects_do(cl); 509 } 510 511 void ZHeap::serviceability_initialize() { 512 _serviceability.initialize(); 513 } 514 515 GCMemoryManager* ZHeap::serviceability_memory_manager() { 516 return _serviceability.memory_manager(); 517 } 518 519 MemoryPool* ZHeap::serviceability_memory_pool() { 520 return _serviceability.memory_pool(); 521 } 522 523 ZServiceabilityCounters* ZHeap::serviceability_counters() { 524 return _serviceability.counters(); 525 } 526 527 void ZHeap::print_on(outputStream* st) const { 528 st->print_cr(" ZHeap used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M", 529 used() / M, 530 capacity() / M, 531 max_capacity() / M); 532 MetaspaceUtils::print_on(st); 533 } 534 535 void ZHeap::print_extended_on(outputStream* st) const { 536 print_on(st); 537 st->cr(); 538 539 // Do not allow pages to be deleted 540 _page_allocator.enable_deferred_delete(); 541 542 // Print all pages 543 ZPageTableIterator iter(&_page_table); 544 for (ZPage* page; iter.next(&page);) { 545 page->print_on(st); 546 } 547 548 // Allow pages to be deleted 549 _page_allocator.enable_deferred_delete(); 550 551 st->cr(); 552 } 553 554 class ZVerifyRootsTask : public ZTask { 555 private: 556 ZStatTimerDisable _disable; 557 ZRootsIterator _strong_roots; 558 ZWeakRootsIterator _weak_roots; 559 560 public: 561 ZVerifyRootsTask() : 562 ZTask("ZVerifyRootsTask"), 563 _disable(), 564 _strong_roots(), 565 _weak_roots() {} 566 567 virtual void work() { 568 ZStatTimerDisable disable; 569 ZVerifyOopClosure cl; 570 _strong_roots.oops_do(&cl); 571 _weak_roots.oops_do(&cl); 572 } 573 }; 574 575 void ZHeap::verify() { 576 // Heap verification can only be done between mark end and 577 // relocate start. This is the only window where all oop are 578 // good and the whole heap is in a consistent state. 579 guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase"); 580 581 { 582 ZVerifyRootsTask task; 583 _workers.run_parallel(&task); 584 } 585 586 { 587 ZVerifyObjectClosure cl; 588 object_iterate(&cl, false /* visit_referents */); 589 } 590 }