1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "gc/parallel/parallelArguments.hpp" 28 #include "gc/parallel/objectStartArray.inline.hpp" 29 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 31 #include "gc/parallel/psMemoryPool.hpp" 32 #include "gc/parallel/psParallelCompact.inline.hpp" 33 #include "gc/parallel/psPromotionManager.hpp" 34 #include "gc/parallel/psScavenge.hpp" 35 #include "gc/parallel/psVMOperations.hpp" 36 #include "gc/shared/gcHeapSummary.hpp" 37 #include "gc/shared/gcLocker.hpp" 38 #include "gc/shared/gcWhen.hpp" 39 #include "gc/shared/genArguments.hpp" 40 #include "gc/shared/gcInitLogger.hpp" 41 #include "gc/shared/locationPrinter.inline.hpp" 42 #include "gc/shared/scavengableNMethods.hpp" 43 #include "logging/log.hpp" 44 #include "memory/iterator.hpp" 45 #include "memory/metaspaceCounters.hpp" 46 #include "memory/universe.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/handles.inline.hpp" 49 #include "runtime/java.hpp" 50 #include "runtime/vmThread.hpp" 51 #include "services/memoryManager.hpp" 52 #include "services/memTracker.hpp" 53 #include "utilities/macros.hpp" 54 #include "utilities/vmError.hpp" 55 56 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 57 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 58 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 59 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 60 61 jint ParallelScavengeHeap::initialize() { 62 const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes(); 63 64 ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment); 65 66 os::trace_page_sizes("Heap", 67 MinHeapSize, 68 reserved_heap_size, 69 GenAlignment, 70 heap_rs.base(), 71 heap_rs.size()); 72 73 initialize_reserved_region(heap_rs); 74 75 PSCardTable* card_table = new PSCardTable(heap_rs.region()); 76 card_table->initialize(); 77 CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); 78 barrier_set->initialize(); 79 BarrierSet::set_barrier_set(barrier_set); 80 81 // Make up the generations 82 assert(MinOldSize <= OldSize && OldSize <= MaxOldSize, "Parameter check"); 83 assert(MinNewSize <= NewSize && NewSize <= MaxNewSize, "Parameter check"); 84 85 // Layout the reserved space for the generations. 86 // If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows). 87 ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, ParallelArguments::is_heterogeneous_heap() /* split */); 88 ReservedSpace young_rs = heap_rs.last_part(MaxOldSize); 89 assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap"); 90 91 // Create and initialize the generations. 92 _young_gen = new PSYoungGen( 93 young_rs, 94 NewSize, 95 MinNewSize, 96 MaxNewSize); 97 _old_gen = new PSOldGen( 98 old_rs, 99 OldSize, 100 MinOldSize, 101 MaxOldSize, 102 "old", 1); 103 104 assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check"); 105 assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check"); 106 107 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 108 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 109 110 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 111 const size_t old_capacity = _old_gen->capacity_in_bytes(); 112 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 113 _size_policy = 114 new PSAdaptiveSizePolicy(eden_capacity, 115 initial_promo_size, 116 young_gen()->to_space()->capacity_in_bytes(), 117 GenAlignment, 118 max_gc_pause_sec, 119 max_gc_minor_pause_sec, 120 GCTimeRatio 121 ); 122 123 assert(ParallelArguments::is_heterogeneous_heap() || 124 (old_gen()->virtual_space()->high_boundary() == 125 young_gen()->virtual_space()->low_boundary()), 126 "Boundaries must meet"); 127 // initialize the policy counters - 2 collectors, 2 generations 128 _gc_policy_counters = 129 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy); 130 131 if (!PSParallelCompact::initialize()) { 132 return JNI_ENOMEM; 133 } 134 135 // Set up WorkGang 136 _workers.initialize_workers(); 137 138 GCInitLogger::print(); 139 140 return JNI_OK; 141 } 142 143 void ParallelScavengeHeap::initialize_serviceability() { 144 145 _eden_pool = new EdenMutableSpacePool(_young_gen, 146 _young_gen->eden_space(), 147 "PS Eden Space", 148 false /* support_usage_threshold */); 149 150 _survivor_pool = new SurvivorMutableSpacePool(_young_gen, 151 "PS Survivor Space", 152 false /* support_usage_threshold */); 153 154 _old_pool = new PSGenerationPool(_old_gen, 155 "PS Old Gen", 156 true /* support_usage_threshold */); 157 158 _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC"); 159 _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC"); 160 161 _old_manager->add_pool(_eden_pool); 162 _old_manager->add_pool(_survivor_pool); 163 _old_manager->add_pool(_old_pool); 164 165 _young_manager->add_pool(_eden_pool); 166 _young_manager->add_pool(_survivor_pool); 167 168 } 169 170 class PSIsScavengable : public BoolObjectClosure { 171 bool do_object_b(oop obj) { 172 return ParallelScavengeHeap::heap()->is_in_young(obj); 173 } 174 }; 175 176 static PSIsScavengable _is_scavengable; 177 178 void ParallelScavengeHeap::post_initialize() { 179 CollectedHeap::post_initialize(); 180 // Need to init the tenuring threshold 181 PSScavenge::initialize(); 182 PSParallelCompact::post_initialize(); 183 PSPromotionManager::initialize(); 184 185 ScavengableNMethods::initialize(&_is_scavengable); 186 } 187 188 void ParallelScavengeHeap::update_counters() { 189 young_gen()->update_counters(); 190 old_gen()->update_counters(); 191 MetaspaceCounters::update_performance_counters(); 192 CompressedClassSpaceCounters::update_performance_counters(); 193 } 194 195 size_t ParallelScavengeHeap::capacity() const { 196 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 197 return value; 198 } 199 200 size_t ParallelScavengeHeap::used() const { 201 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 202 return value; 203 } 204 205 bool ParallelScavengeHeap::is_maximal_no_gc() const { 206 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 207 } 208 209 210 size_t ParallelScavengeHeap::max_capacity() const { 211 size_t estimated = reserved_region().byte_size(); 212 if (UseAdaptiveSizePolicy) { 213 estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size()); 214 } else { 215 estimated -= young_gen()->to_space()->capacity_in_bytes(); 216 } 217 return MAX2(estimated, capacity()); 218 } 219 220 bool ParallelScavengeHeap::is_in(const void* p) const { 221 return young_gen()->is_in(p) || old_gen()->is_in(p); 222 } 223 224 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 225 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 226 } 227 228 // There are two levels of allocation policy here. 229 // 230 // When an allocation request fails, the requesting thread must invoke a VM 231 // operation, transfer control to the VM thread, and await the results of a 232 // garbage collection. That is quite expensive, and we should avoid doing it 233 // multiple times if possible. 234 // 235 // To accomplish this, we have a basic allocation policy, and also a 236 // failed allocation policy. 237 // 238 // The basic allocation policy controls how you allocate memory without 239 // attempting garbage collection. It is okay to grab locks and 240 // expand the heap, if that can be done without coming to a safepoint. 241 // It is likely that the basic allocation policy will not be very 242 // aggressive. 243 // 244 // The failed allocation policy is invoked from the VM thread after 245 // the basic allocation policy is unable to satisfy a mem_allocate 246 // request. This policy needs to cover the entire range of collection, 247 // heap expansion, and out-of-memory conditions. It should make every 248 // attempt to allocate the requested memory. 249 250 // Basic allocation policy. Should never be called at a safepoint, or 251 // from the VM thread. 252 // 253 // This method must handle cases where many mem_allocate requests fail 254 // simultaneously. When that happens, only one VM operation will succeed, 255 // and the rest will not be executed. For that reason, this method loops 256 // during failed allocation attempts. If the java heap becomes exhausted, 257 // we rely on the size_policy object to force a bail out. 258 HeapWord* ParallelScavengeHeap::mem_allocate( 259 size_t size, 260 bool* gc_overhead_limit_was_exceeded) { 261 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 262 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 263 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 264 265 // In general gc_overhead_limit_was_exceeded should be false so 266 // set it so here and reset it to true only if the gc time 267 // limit is being exceeded as checked below. 268 *gc_overhead_limit_was_exceeded = false; 269 270 HeapWord* result = young_gen()->allocate(size); 271 272 uint loop_count = 0; 273 uint gc_count = 0; 274 uint gclocker_stalled_count = 0; 275 276 while (result == NULL) { 277 // We don't want to have multiple collections for a single filled generation. 278 // To prevent this, each thread tracks the total_collections() value, and if 279 // the count has changed, does not do a new collection. 280 // 281 // The collection count must be read only while holding the heap lock. VM 282 // operations also hold the heap lock during collections. There is a lock 283 // contention case where thread A blocks waiting on the Heap_lock, while 284 // thread B is holding it doing a collection. When thread A gets the lock, 285 // the collection count has already changed. To prevent duplicate collections, 286 // The policy MUST attempt allocations during the same period it reads the 287 // total_collections() value! 288 { 289 MutexLocker ml(Heap_lock); 290 gc_count = total_collections(); 291 292 result = young_gen()->allocate(size); 293 if (result != NULL) { 294 return result; 295 } 296 297 // If certain conditions hold, try allocating from the old gen. 298 result = mem_allocate_old_gen(size); 299 if (result != NULL) { 300 return result; 301 } 302 303 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 304 return NULL; 305 } 306 307 // Failed to allocate without a gc. 308 if (GCLocker::is_active_and_needs_gc()) { 309 // If this thread is not in a jni critical section, we stall 310 // the requestor until the critical section has cleared and 311 // GC allowed. When the critical section clears, a GC is 312 // initiated by the last thread exiting the critical section; so 313 // we retry the allocation sequence from the beginning of the loop, 314 // rather than causing more, now probably unnecessary, GC attempts. 315 JavaThread* jthr = JavaThread::current(); 316 if (!jthr->in_critical()) { 317 MutexUnlocker mul(Heap_lock); 318 GCLocker::stall_until_clear(); 319 gclocker_stalled_count += 1; 320 continue; 321 } else { 322 if (CheckJNICalls) { 323 fatal("Possible deadlock due to allocating while" 324 " in jni critical section"); 325 } 326 return NULL; 327 } 328 } 329 } 330 331 if (result == NULL) { 332 // Generate a VM operation 333 VM_ParallelGCFailedAllocation op(size, gc_count); 334 VMThread::execute(&op); 335 336 // Did the VM operation execute? If so, return the result directly. 337 // This prevents us from looping until time out on requests that can 338 // not be satisfied. 339 if (op.prologue_succeeded()) { 340 assert(is_in_or_null(op.result()), "result not in heap"); 341 342 // If GC was locked out during VM operation then retry allocation 343 // and/or stall as necessary. 344 if (op.gc_locked()) { 345 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 346 continue; // retry and/or stall as necessary 347 } 348 349 // Exit the loop if the gc time limit has been exceeded. 350 // The allocation must have failed above ("result" guarding 351 // this path is NULL) and the most recent collection has exceeded the 352 // gc overhead limit (although enough may have been collected to 353 // satisfy the allocation). Exit the loop so that an out-of-memory 354 // will be thrown (return a NULL ignoring the contents of 355 // op.result()), 356 // but clear gc_overhead_limit_exceeded so that the next collection 357 // starts with a clean slate (i.e., forgets about previous overhead 358 // excesses). Fill op.result() with a filler object so that the 359 // heap remains parsable. 360 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 361 const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); 362 363 if (limit_exceeded && softrefs_clear) { 364 *gc_overhead_limit_was_exceeded = true; 365 size_policy()->set_gc_overhead_limit_exceeded(false); 366 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set"); 367 if (op.result() != NULL) { 368 CollectedHeap::fill_with_object(op.result(), size); 369 } 370 return NULL; 371 } 372 373 return op.result(); 374 } 375 } 376 377 // The policy object will prevent us from looping forever. If the 378 // time spent in gc crosses a threshold, we will bail out. 379 loop_count++; 380 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 381 (loop_count % QueuedAllocationWarningCount == 0)) { 382 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 383 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 384 } 385 } 386 387 return result; 388 } 389 390 // A "death march" is a series of ultra-slow allocations in which a full gc is 391 // done before each allocation, and after the full gc the allocation still 392 // cannot be satisfied from the young gen. This routine detects that condition; 393 // it should be called after a full gc has been done and the allocation 394 // attempted from the young gen. The parameter 'addr' should be the result of 395 // that young gen allocation attempt. 396 void 397 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 398 if (addr != NULL) { 399 _death_march_count = 0; // death march has ended 400 } else if (_death_march_count == 0) { 401 if (should_alloc_in_eden(size)) { 402 _death_march_count = 1; // death march has started 403 } 404 } 405 } 406 407 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 408 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 409 // Size is too big for eden, or gc is locked out. 410 return old_gen()->allocate(size); 411 } 412 413 // If a "death march" is in progress, allocate from the old gen a limited 414 // number of times before doing a GC. 415 if (_death_march_count > 0) { 416 if (_death_march_count < 64) { 417 ++_death_march_count; 418 return old_gen()->allocate(size); 419 } else { 420 _death_march_count = 0; 421 } 422 } 423 return NULL; 424 } 425 426 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 427 // The do_full_collection() parameter clear_all_soft_refs 428 // is interpreted here as maximum_compaction which will 429 // cause SoftRefs to be cleared. 430 bool maximum_compaction = clear_all_soft_refs; 431 PSParallelCompact::invoke(maximum_compaction); 432 } 433 434 // Failed allocation policy. Must be called from the VM thread, and 435 // only at a safepoint! Note that this method has policy for allocation 436 // flow, and NOT collection policy. So we do not check for gc collection 437 // time over limit here, that is the responsibility of the heap specific 438 // collection methods. This method decides where to attempt allocations, 439 // and when to attempt collections, but no collection specific policy. 440 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 441 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 442 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 443 assert(!is_gc_active(), "not reentrant"); 444 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 445 446 // We assume that allocation in eden will fail unless we collect. 447 448 // First level allocation failure, scavenge and allocate in young gen. 449 GCCauseSetter gccs(this, GCCause::_allocation_failure); 450 const bool invoked_full_gc = PSScavenge::invoke(); 451 HeapWord* result = young_gen()->allocate(size); 452 453 // Second level allocation failure. 454 // Mark sweep and allocate in young generation. 455 if (result == NULL && !invoked_full_gc) { 456 do_full_collection(false); 457 result = young_gen()->allocate(size); 458 } 459 460 death_march_check(result, size); 461 462 // Third level allocation failure. 463 // After mark sweep and young generation allocation failure, 464 // allocate in old generation. 465 if (result == NULL) { 466 result = old_gen()->allocate(size); 467 } 468 469 // Fourth level allocation failure. We're running out of memory. 470 // More complete mark sweep and allocate in young generation. 471 if (result == NULL) { 472 do_full_collection(true); 473 result = young_gen()->allocate(size); 474 } 475 476 // Fifth level allocation failure. 477 // After more complete mark sweep, allocate in old generation. 478 if (result == NULL) { 479 result = old_gen()->allocate(size); 480 } 481 482 return result; 483 } 484 485 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 486 CollectedHeap::ensure_parsability(retire_tlabs); 487 young_gen()->eden_space()->ensure_parsability(); 488 } 489 490 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 491 return young_gen()->eden_space()->tlab_capacity(thr); 492 } 493 494 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 495 return young_gen()->eden_space()->tlab_used(thr); 496 } 497 498 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 499 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 500 } 501 502 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 503 HeapWord* result = young_gen()->allocate(requested_size); 504 if (result != NULL) { 505 *actual_size = requested_size; 506 } 507 508 return result; 509 } 510 511 void ParallelScavengeHeap::resize_all_tlabs() { 512 CollectedHeap::resize_all_tlabs(); 513 } 514 515 // This method is used by System.gc() and JVMTI. 516 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 517 assert(!Heap_lock->owned_by_self(), 518 "this thread should not own the Heap_lock"); 519 520 uint gc_count = 0; 521 uint full_gc_count = 0; 522 { 523 MutexLocker ml(Heap_lock); 524 // This value is guarded by the Heap_lock 525 gc_count = total_collections(); 526 full_gc_count = total_full_collections(); 527 } 528 529 if (GCLocker::should_discard(cause, gc_count)) { 530 return; 531 } 532 533 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 534 VMThread::execute(&op); 535 } 536 537 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 538 young_gen()->object_iterate(cl); 539 old_gen()->object_iterate(cl); 540 } 541 542 543 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 544 if (young_gen()->is_in_reserved(addr)) { 545 assert(young_gen()->is_in(addr), 546 "addr should be in allocated part of young gen"); 547 // called from os::print_location by find or VMError 548 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 549 Unimplemented(); 550 } else if (old_gen()->is_in_reserved(addr)) { 551 assert(old_gen()->is_in(addr), 552 "addr should be in allocated part of old gen"); 553 return old_gen()->start_array()->object_start((HeapWord*)addr); 554 } 555 return 0; 556 } 557 558 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 559 return block_start(addr) == addr; 560 } 561 562 jlong ParallelScavengeHeap::millis_since_last_gc() { 563 return PSParallelCompact::millis_since_last_gc(); 564 } 565 566 void ParallelScavengeHeap::prepare_for_verify() { 567 ensure_parsability(false); // no need to retire TLABs for verification 568 } 569 570 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 571 PSOldGen* old = old_gen(); 572 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 573 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 574 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 575 576 PSYoungGen* young = young_gen(); 577 VirtualSpaceSummary young_summary(young->reserved().start(), 578 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 579 580 MutableSpace* eden = young_gen()->eden_space(); 581 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 582 583 MutableSpace* from = young_gen()->from_space(); 584 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 585 586 MutableSpace* to = young_gen()->to_space(); 587 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 588 589 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 590 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 591 } 592 593 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const { 594 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr); 595 } 596 597 void ParallelScavengeHeap::print_on(outputStream* st) const { 598 if (young_gen() != NULL) { 599 young_gen()->print_on(st); 600 } 601 if (old_gen() != NULL) { 602 old_gen()->print_on(st); 603 } 604 MetaspaceUtils::print_on(st); 605 } 606 607 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 608 this->CollectedHeap::print_on_error(st); 609 610 st->cr(); 611 PSParallelCompact::print_on_error(st); 612 } 613 614 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 615 ParallelScavengeHeap::heap()->workers().threads_do(tc); 616 } 617 618 void ParallelScavengeHeap::print_tracing_info() const { 619 AdaptiveSizePolicyOutput::print(); 620 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 621 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds()); 622 } 623 624 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const { 625 const PSYoungGen* const young = young_gen(); 626 const MutableSpace* const eden = young->eden_space(); 627 const MutableSpace* const from = young->from_space(); 628 const PSOldGen* const old = old_gen(); 629 630 return PreGenGCValues(young->used_in_bytes(), 631 young->capacity_in_bytes(), 632 eden->used_in_bytes(), 633 eden->capacity_in_bytes(), 634 from->used_in_bytes(), 635 from->capacity_in_bytes(), 636 old->used_in_bytes(), 637 old->capacity_in_bytes()); 638 } 639 640 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { 641 const PSYoungGen* const young = young_gen(); 642 const MutableSpace* const eden = young->eden_space(); 643 const MutableSpace* const from = young->from_space(); 644 const PSOldGen* const old = old_gen(); 645 646 log_info(gc, heap)(HEAP_CHANGE_FORMAT" " 647 HEAP_CHANGE_FORMAT" " 648 HEAP_CHANGE_FORMAT, 649 HEAP_CHANGE_FORMAT_ARGS(young->name(), 650 pre_gc_values.young_gen_used(), 651 pre_gc_values.young_gen_capacity(), 652 young->used_in_bytes(), 653 young->capacity_in_bytes()), 654 HEAP_CHANGE_FORMAT_ARGS("Eden", 655 pre_gc_values.eden_used(), 656 pre_gc_values.eden_capacity(), 657 eden->used_in_bytes(), 658 eden->capacity_in_bytes()), 659 HEAP_CHANGE_FORMAT_ARGS("From", 660 pre_gc_values.from_used(), 661 pre_gc_values.from_capacity(), 662 from->used_in_bytes(), 663 from->capacity_in_bytes())); 664 log_info(gc, heap)(HEAP_CHANGE_FORMAT, 665 HEAP_CHANGE_FORMAT_ARGS(old->name(), 666 pre_gc_values.old_gen_used(), 667 pre_gc_values.old_gen_capacity(), 668 old->used_in_bytes(), 669 old->capacity_in_bytes())); 670 MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes()); 671 } 672 673 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 674 // Why do we need the total_collections()-filter below? 675 if (total_collections() > 0) { 676 log_debug(gc, verify)("Tenured"); 677 old_gen()->verify(); 678 679 log_debug(gc, verify)("Eden"); 680 young_gen()->verify(); 681 } 682 } 683 684 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 685 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 686 gc_tracer->report_gc_heap_summary(when, heap_summary); 687 688 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 689 gc_tracer->report_metaspace_summary(when, metaspace_summary); 690 } 691 692 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() { 693 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 694 } 695 696 PSCardTable* ParallelScavengeHeap::card_table() { 697 return static_cast<PSCardTable*>(barrier_set()->card_table()); 698 } 699 700 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 701 size_t survivor_size) { 702 // Delegate the resize to the generation. 703 _young_gen->resize(eden_size, survivor_size); 704 } 705 706 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 707 // Delegate the resize to the generation. 708 _old_gen->resize(desired_free_space); 709 } 710 711 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 712 // nothing particular 713 } 714 715 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 716 // nothing particular 717 } 718 719 #ifndef PRODUCT 720 void ParallelScavengeHeap::record_gen_tops_before_GC() { 721 if (ZapUnusedHeapArea) { 722 young_gen()->record_spaces_top(); 723 old_gen()->record_spaces_top(); 724 } 725 } 726 727 void ParallelScavengeHeap::gen_mangle_unused_area() { 728 if (ZapUnusedHeapArea) { 729 young_gen()->eden_space()->mangle_unused_area(); 730 young_gen()->to_space()->mangle_unused_area(); 731 young_gen()->from_space()->mangle_unused_area(); 732 old_gen()->object_space()->mangle_unused_area(); 733 } 734 } 735 #endif 736 737 void ParallelScavengeHeap::register_nmethod(nmethod* nm) { 738 ScavengableNMethods::register_nmethod(nm); 739 } 740 741 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) { 742 ScavengableNMethods::unregister_nmethod(nm); 743 } 744 745 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { 746 ScavengableNMethods::verify_nmethod(nm); 747 } 748 749 void ParallelScavengeHeap::flush_nmethod(nmethod* nm) { 750 // nothing particular 751 } 752 753 void ParallelScavengeHeap::prune_scavengable_nmethods() { 754 ScavengableNMethods::prune_nmethods(); 755 } 756 757 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() { 758 GrowableArray<GCMemoryManager*> memory_managers(2); 759 memory_managers.append(_young_manager); 760 memory_managers.append(_old_manager); 761 return memory_managers; 762 } 763 764 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() { 765 GrowableArray<MemoryPool*> memory_pools(3); 766 memory_pools.append(_eden_pool); 767 memory_pools.append(_survivor_pool); 768 memory_pools.append(_old_pool); 769 return memory_pools; 770 }