1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/oopStorage.hpp" 26 #include "gc/z/zAddress.hpp" 27 #include "gc/z/zGlobals.hpp" 28 #include "gc/z/zHeap.inline.hpp" 29 #include "gc/z/zHeapIterator.hpp" 30 #include "gc/z/zList.inline.hpp" 31 #include "gc/z/zLock.inline.hpp" 32 #include "gc/z/zMark.inline.hpp" 33 #include "gc/z/zOopClosures.inline.hpp" 34 #include "gc/z/zPage.inline.hpp" 35 #include "gc/z/zPageTable.inline.hpp" 36 #include "gc/z/zRelocationSet.inline.hpp" 37 #include "gc/z/zResurrection.hpp" 38 #include "gc/z/zRootsIterator.hpp" 39 #include "gc/z/zStat.hpp" 40 #include "gc/z/zTask.hpp" 41 #include "gc/z/zThread.hpp" 42 #include "gc/z/zTracer.inline.hpp" 43 #include "gc/z/zVirtualMemory.inline.hpp" 44 #include "gc/z/zWorkers.inline.hpp" 45 #include "logging/log.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/safepoint.hpp" 49 #include "runtime/thread.hpp" 50 #include "utilities/align.hpp" 51 #include "utilities/debug.hpp" 52 53 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes); 54 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes); 55 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes); 56 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes); 57 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond); 58 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond); 59 60 ZHeap* ZHeap::_heap = NULL; 61 62 ZHeap::ZHeap() : 63 _workers(), 64 _object_allocator(_workers.nworkers()), 65 _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()), 66 _pagetable(), 67 _mark(&_workers, &_pagetable), 68 _reference_processor(&_workers), 69 _weak_roots_processor(&_workers), 70 _relocate(&_workers), 71 _relocation_set(), 72 _unload(&_workers), 73 _serviceability(heap_min_size(), heap_max_size()) { 74 // Install global heap instance 75 assert(_heap == NULL, "Already initialized"); 76 _heap = this; 77 78 // Update statistics 79 ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size()); 80 } 81 82 size_t ZHeap::heap_min_size() const { 83 const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize); 84 return MIN2(aligned_min_size, heap_max_size()); 85 } 86 87 size_t ZHeap::heap_max_size() const { 88 const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize); 89 return MIN2(aligned_max_size, ZAddressOffsetMax); 90 } 91 92 size_t ZHeap::heap_max_reserve_size() const { 93 // Reserve one small page per worker plus one shared medium page. This is still just 94 // an estimate and doesn't guarantee that we can't run out of memory during relocation. 95 const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium; 96 return MIN2(max_reserve_size, heap_max_size()); 97 } 98 99 bool ZHeap::is_initialized() const { 100 return _page_allocator.is_initialized() && _mark.is_initialized(); 101 } 102 103 size_t ZHeap::min_capacity() const { 104 return heap_min_size(); 105 } 106 107 size_t ZHeap::max_capacity() const { 108 return _page_allocator.max_capacity(); 109 } 110 111 size_t ZHeap::current_max_capacity() const { 112 return _page_allocator.current_max_capacity(); 113 } 114 115 size_t ZHeap::capacity() const { 116 return _page_allocator.capacity(); 117 } 118 119 size_t ZHeap::max_reserve() const { 120 return _page_allocator.max_reserve(); 121 } 122 123 size_t ZHeap::used_high() const { 124 return _page_allocator.used_high(); 125 } 126 127 size_t ZHeap::used_low() const { 128 return _page_allocator.used_low(); 129 } 130 131 size_t ZHeap::used() const { 132 return _page_allocator.used(); 133 } 134 135 size_t ZHeap::allocated() const { 136 return _page_allocator.allocated(); 137 } 138 139 size_t ZHeap::reclaimed() const { 140 return _page_allocator.reclaimed(); 141 } 142 143 size_t ZHeap::tlab_capacity() const { 144 return capacity(); 145 } 146 147 size_t ZHeap::tlab_used() const { 148 return _object_allocator.used(); 149 } 150 151 size_t ZHeap::max_tlab_size() const { 152 return ZObjectSizeLimitSmall; 153 } 154 155 size_t ZHeap::unsafe_max_tlab_alloc() const { 156 size_t size = _object_allocator.remaining(); 157 158 if (size < MinTLABSize) { 159 // The remaining space in the allocator is not enough to 160 // fit the smallest possible TLAB. This means that the next 161 // TLAB allocation will force the allocator to get a new 162 // backing page anyway, which in turn means that we can then 163 // fit the largest possible TLAB. 164 size = max_tlab_size(); 165 } 166 167 return MIN2(size, max_tlab_size()); 168 } 169 170 bool ZHeap::is_in(uintptr_t addr) const { 171 if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) { 172 return false; 173 } 174 175 const ZPage* const page = _pagetable.get(addr); 176 if (page != NULL) { 177 return page->is_in(addr); 178 } 179 180 return false; 181 } 182 183 uintptr_t ZHeap::block_start(uintptr_t addr) const { 184 const ZPage* const page = _pagetable.get(addr); 185 return page->block_start(addr); 186 } 187 188 size_t ZHeap::block_size(uintptr_t addr) const { 189 const ZPage* const page = _pagetable.get(addr); 190 return page->block_size(addr); 191 } 192 193 bool ZHeap::block_is_obj(uintptr_t addr) const { 194 const ZPage* const page = _pagetable.get(addr); 195 return page->block_is_obj(addr); 196 } 197 198 uint ZHeap::nconcurrent_worker_threads() const { 199 return _workers.nconcurrent(); 200 } 201 202 uint ZHeap::nconcurrent_no_boost_worker_threads() const { 203 return _workers.nconcurrent_no_boost(); 204 } 205 206 void ZHeap::set_boost_worker_threads(bool boost) { 207 _workers.set_boost(boost); 208 } 209 210 void ZHeap::worker_threads_do(ThreadClosure* tc) const { 211 _workers.threads_do(tc); 212 } 213 214 void ZHeap::print_worker_threads_on(outputStream* st) const { 215 _workers.print_threads_on(st); 216 } 217 218 void ZHeap::out_of_memory() { 219 ResourceMark rm; 220 221 ZStatInc(ZCounterOutOfMemory); 222 log_info(gc)("Out Of Memory (%s)", Thread::current()->name()); 223 } 224 225 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { 226 ZPage* const page = _page_allocator.alloc_page(type, size, flags); 227 if (page != NULL) { 228 // Update pagetable 229 _pagetable.insert(page); 230 } 231 232 return page; 233 } 234 235 void ZHeap::undo_alloc_page(ZPage* page) { 236 assert(page->is_allocating(), "Invalid page state"); 237 238 ZStatInc(ZCounterUndoPageAllocation); 239 log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT, 240 ZThread::id(), ZThread::name(), p2i(page), page->size()); 241 242 release_page(page, false /* reclaimed */); 243 } 244 245 bool ZHeap::retain_page(ZPage* page) { 246 return page->inc_refcount(); 247 } 248 249 void ZHeap::release_page(ZPage* page, bool reclaimed) { 250 if (page->dec_refcount()) { 251 _page_allocator.free_page(page, reclaimed); 252 } 253 } 254 255 void ZHeap::flip_views() { 256 // For debugging only 257 if (ZUnmapBadViews) { 258 // Flip pages 259 ZPageTableIterator iter(&_pagetable); 260 for (ZPage* page; iter.next(&page);) { 261 if (!page->is_detached()) { 262 _page_allocator.flip_page(page); 263 } 264 } 265 266 // Flip pre-mapped memory 267 _page_allocator.flip_pre_mapped(); 268 } 269 } 270 271 void ZHeap::mark_start() { 272 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 273 274 // Update statistics 275 ZStatSample(ZSamplerHeapUsedBeforeMark, used()); 276 277 // Flip address view 278 ZAddressMasks::flip_to_marked(); 279 flip_views(); 280 281 // Retire allocating pages 282 _object_allocator.retire_pages(); 283 284 // Reset allocated/reclaimed/used statistics 285 _page_allocator.reset_statistics(); 286 287 // Reset encountered/dropped/enqueued statistics 288 _reference_processor.reset_statistics(); 289 290 // Enter mark phase 291 ZGlobalPhase = ZPhaseMark; 292 293 // Reset marking information and mark roots 294 _mark.start(); 295 296 // Update statistics 297 ZStatHeap::set_at_mark_start(capacity(), used()); 298 } 299 300 void ZHeap::mark(bool initial) { 301 _mark.mark(initial); 302 } 303 304 void ZHeap::mark_flush_and_free(Thread* thread) { 305 _mark.flush_and_free(thread); 306 } 307 308 class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure { 309 public: 310 virtual void do_oop(oop* p) { 311 ZBarrier::mark_barrier_on_root_oop_field(p); 312 } 313 314 virtual void do_oop(narrowOop* p) { 315 ShouldNotReachHere(); 316 } 317 }; 318 319 class ZFixupPartialLoadsTask : public ZTask { 320 private: 321 ZThreadRootsIterator _thread_roots; 322 323 public: 324 ZFixupPartialLoadsTask() : 325 ZTask("ZFixupPartialLoadsTask"), 326 _thread_roots() {} 327 328 virtual void work() { 329 ZFixupPartialLoadsClosure cl; 330 _thread_roots.oops_do(&cl); 331 } 332 }; 333 334 void ZHeap::fixup_partial_loads() { 335 ZFixupPartialLoadsTask task; 336 _workers.run_parallel(&task); 337 } 338 339 bool ZHeap::mark_end() { 340 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 341 342 // C2 can generate code where a safepoint poll is inserted 343 // between a load and the associated load barrier. To handle 344 // this case we need to rescan the thread stack here to make 345 // sure such oops are marked. 346 fixup_partial_loads(); 347 348 // Try end marking 349 if (!_mark.end()) { 350 // Marking not completed, continue concurrent mark 351 return false; 352 } 353 354 // Enter mark completed phase 355 ZGlobalPhase = ZPhaseMarkCompleted; 356 357 // Update statistics 358 ZStatSample(ZSamplerHeapUsedAfterMark, used()); 359 ZStatHeap::set_at_mark_end(capacity(), allocated(), used()); 360 361 // Block resurrection of weak/phantom references 362 ZResurrection::block(); 363 364 // Process weak roots 365 _weak_roots_processor.process_weak_roots(); 366 367 // Prepare to unload unused classes and code 368 _unload.prepare(); 369 370 return true; 371 } 372 373 void ZHeap::set_soft_reference_policy(bool clear) { 374 _reference_processor.set_soft_reference_policy(clear); 375 } 376 377 void ZHeap::process_non_strong_references() { 378 // Process Soft/Weak/Final/PhantomReferences 379 _reference_processor.process_references(); 380 381 // Process concurrent weak roots 382 _weak_roots_processor.process_concurrent_weak_roots(); 383 384 // Unload unused classes and code 385 _unload.unload(); 386 387 // Unblock resurrection of weak/phantom references 388 ZResurrection::unblock(); 389 390 // Enqueue Soft/Weak/Final/PhantomReferences. Note that this 391 // must be done after unblocking resurrection. Otherwise the 392 // Finalizer thread could call Reference.get() on the Finalizers 393 // that were just enqueued, which would incorrectly return null 394 // during the resurrection block window, since such referents 395 // are only Finalizable marked. 396 _reference_processor.enqueue_references(); 397 } 398 399 void ZHeap::destroy_detached_pages() { 400 ZList<ZPage> list; 401 402 _page_allocator.flush_detached_pages(&list); 403 404 for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) { 405 // Remove pagetable entry 406 _pagetable.remove(page); 407 408 // Delete the page 409 _page_allocator.destroy_page(page); 410 } 411 } 412 413 void ZHeap::select_relocation_set() { 414 // Register relocatable pages with selector 415 ZRelocationSetSelector selector; 416 ZPageTableIterator iter(&_pagetable); 417 for (ZPage* page; iter.next(&page);) { 418 if (!page->is_relocatable()) { 419 // Not relocatable, don't register 420 continue; 421 } 422 423 if (page->is_marked()) { 424 // Register live page 425 selector.register_live_page(page); 426 } else { 427 // Register garbage page 428 selector.register_garbage_page(page); 429 430 // Reclaim page immediately 431 release_page(page, true /* reclaimed */); 432 } 433 } 434 435 // Select pages to relocate 436 selector.select(&_relocation_set); 437 438 // Update statistics 439 ZStatRelocation::set_at_select_relocation_set(selector.relocating()); 440 ZStatHeap::set_at_select_relocation_set(selector.live(), 441 selector.garbage(), 442 reclaimed()); 443 } 444 445 void ZHeap::prepare_relocation_set() { 446 ZRelocationSetIterator iter(&_relocation_set); 447 for (ZPage* page; iter.next(&page);) { 448 // Prepare for relocation 449 page->set_forwarding(); 450 451 // Update pagetable 452 _pagetable.set_relocating(page); 453 } 454 } 455 456 void ZHeap::reset_relocation_set() { 457 ZRelocationSetIterator iter(&_relocation_set); 458 for (ZPage* page; iter.next(&page);) { 459 // Reset relocation information 460 page->reset_forwarding(); 461 462 // Update pagetable 463 _pagetable.clear_relocating(page); 464 } 465 } 466 467 void ZHeap::relocate_start() { 468 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 469 470 // Finish unloading of classes and code 471 _unload.finish(); 472 473 // Flip address view 474 ZAddressMasks::flip_to_remapped(); 475 flip_views(); 476 477 // Enter relocate phase 478 ZGlobalPhase = ZPhaseRelocate; 479 480 // Update statistics 481 ZStatSample(ZSamplerHeapUsedBeforeRelocation, used()); 482 ZStatHeap::set_at_relocate_start(capacity(), allocated(), used()); 483 484 // Remap/Relocate roots 485 _relocate.start(); 486 } 487 488 uintptr_t ZHeap::relocate_object(uintptr_t addr) { 489 assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed"); 490 ZPage* const page = _pagetable.get(addr); 491 const bool retained = retain_page(page); 492 const uintptr_t new_addr = page->relocate_object(addr); 493 if (retained) { 494 release_page(page, true /* reclaimed */); 495 } 496 497 return new_addr; 498 } 499 500 uintptr_t ZHeap::forward_object(uintptr_t addr) { 501 assert(ZGlobalPhase == ZPhaseMark || 502 ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed"); 503 ZPage* const page = _pagetable.get(addr); 504 return page->forward_object(addr); 505 } 506 507 void ZHeap::relocate() { 508 // Relocate relocation set 509 const bool success = _relocate.relocate(&_relocation_set); 510 511 // Update statistics 512 ZStatSample(ZSamplerHeapUsedAfterRelocation, used()); 513 ZStatRelocation::set_at_relocate_end(success); 514 ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(), 515 used(), used_high(), used_low()); 516 } 517 518 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) { 519 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 520 521 ZHeapIterator iter(visit_referents); 522 iter.objects_do(cl); 523 } 524 525 void ZHeap::serviceability_initialize() { 526 _serviceability.initialize(); 527 } 528 529 GCMemoryManager* ZHeap::serviceability_memory_manager() { 530 return _serviceability.memory_manager(); 531 } 532 533 MemoryPool* ZHeap::serviceability_memory_pool() { 534 return _serviceability.memory_pool(); 535 } 536 537 ZServiceabilityCounters* ZHeap::serviceability_counters() { 538 return _serviceability.counters(); 539 } 540 541 void ZHeap::print_on(outputStream* st) const { 542 st->print_cr(" ZHeap used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M", 543 used() / M, 544 capacity() / M, 545 max_capacity() / M); 546 MetaspaceUtils::print_on(st); 547 } 548 549 void ZHeap::print_extended_on(outputStream* st) const { 550 print_on(st); 551 st->cr(); 552 553 ZPageTableIterator iter(&_pagetable); 554 for (ZPage* page; iter.next(&page);) { 555 page->print_on(st); 556 } 557 558 st->cr(); 559 } 560 561 class ZVerifyRootsTask : public ZTask { 562 private: 563 ZRootsIterator _strong_roots; 564 ZWeakRootsIterator _weak_roots; 565 566 public: 567 ZVerifyRootsTask() : 568 ZTask("ZVerifyRootsTask"), 569 _strong_roots(), 570 _weak_roots() {} 571 572 virtual void work() { 573 ZVerifyOopClosure cl; 574 _strong_roots.oops_do(&cl); 575 _weak_roots.oops_do(&cl); 576 } 577 }; 578 579 void ZHeap::verify() { 580 // Heap verification can only be done between mark end and 581 // relocate start. This is the only window where all oop are 582 // good and the whole heap is in a consistent state. 583 guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase"); 584 585 { 586 ZVerifyRootsTask task; 587 _workers.run_parallel(&task); 588 } 589 590 { 591 ZVerifyObjectClosure cl; 592 object_iterate(&cl, false /* visit_referents */); 593 } 594 }