1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "gc/parallel/adjoiningGenerations.hpp" 28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp" 29 #include "gc/parallel/adjoiningVirtualSpaces.hpp" 30 #include "gc/parallel/parallelArguments.hpp" 31 #include "gc/parallel/objectStartArray.inline.hpp" 32 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 33 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 34 #include "gc/parallel/psMarkSweepProxy.hpp" 35 #include "gc/parallel/psMemoryPool.hpp" 36 #include "gc/parallel/psParallelCompact.inline.hpp" 37 #include "gc/parallel/psPromotionManager.hpp" 38 #include "gc/parallel/psScavenge.hpp" 39 #include "gc/parallel/psVMOperations.hpp" 40 #include "gc/shared/gcHeapSummary.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "gc/shared/gcWhen.hpp" 43 #include "gc/shared/genArguments.hpp" 44 #include "gc/shared/locationPrinter.inline.hpp" 45 #include "gc/shared/scavengableNMethods.hpp" 46 #include "logging/log.hpp" 47 #include "memory/iterator.hpp" 48 #include "memory/metaspaceCounters.hpp" 49 #include "memory/universe.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/vmThread.hpp" 54 #include "services/memoryManager.hpp" 55 #include "services/memTracker.hpp" 56 #include "utilities/macros.hpp" 57 #include "utilities/vmError.hpp" 58 59 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 60 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 61 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 62 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 63 64 jint ParallelScavengeHeap::initialize() { 65 const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes(); 66 67 ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment); 68 69 os::trace_page_sizes("Heap", 70 MinHeapSize, 71 reserved_heap_size, 72 GenAlignment, 73 heap_rs.base(), 74 heap_rs.size()); 75 76 initialize_reserved_region(heap_rs); 77 78 PSCardTable* card_table = new PSCardTable(heap_rs.region()); 79 card_table->initialize(); 80 CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); 81 barrier_set->initialize(); 82 BarrierSet::set_barrier_set(barrier_set); 83 84 // Make up the generations 85 // Calculate the maximum size that a generation can grow. This 86 // includes growth into the other generation. Note that the 87 // parameter _max_gen_size is kept as the maximum 88 // size of the generation as the boundaries currently stand. 89 // _max_gen_size is still used as that value. 90 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 91 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 92 93 _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs); 94 95 _old_gen = _gens->old_gen(); 96 _young_gen = _gens->young_gen(); 97 98 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 99 const size_t old_capacity = _old_gen->capacity_in_bytes(); 100 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 101 _size_policy = 102 new PSAdaptiveSizePolicy(eden_capacity, 103 initial_promo_size, 104 young_gen()->to_space()->capacity_in_bytes(), 105 GenAlignment, 106 max_gc_pause_sec, 107 max_gc_minor_pause_sec, 108 GCTimeRatio 109 ); 110 111 assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary || 112 (old_gen()->virtual_space()->high_boundary() == 113 young_gen()->virtual_space()->low_boundary()), 114 "Boundaries must meet"); 115 // initialize the policy counters - 2 collectors, 2 generations 116 _gc_policy_counters = 117 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy); 118 119 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 120 return JNI_ENOMEM; 121 } 122 123 // Set up WorkGang 124 _workers.initialize_workers(); 125 126 return JNI_OK; 127 } 128 129 void ParallelScavengeHeap::initialize_serviceability() { 130 131 _eden_pool = new EdenMutableSpacePool(_young_gen, 132 _young_gen->eden_space(), 133 "PS Eden Space", 134 false /* support_usage_threshold */); 135 136 _survivor_pool = new SurvivorMutableSpacePool(_young_gen, 137 "PS Survivor Space", 138 false /* support_usage_threshold */); 139 140 _old_pool = new PSGenerationPool(_old_gen, 141 "PS Old Gen", 142 true /* support_usage_threshold */); 143 144 _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC"); 145 _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC"); 146 147 _old_manager->add_pool(_eden_pool); 148 _old_manager->add_pool(_survivor_pool); 149 _old_manager->add_pool(_old_pool); 150 151 _young_manager->add_pool(_eden_pool); 152 _young_manager->add_pool(_survivor_pool); 153 154 } 155 156 class PSIsScavengable : public BoolObjectClosure { 157 bool do_object_b(oop obj) { 158 return ParallelScavengeHeap::heap()->is_in_young(obj); 159 } 160 }; 161 162 static PSIsScavengable _is_scavengable; 163 164 void ParallelScavengeHeap::post_initialize() { 165 CollectedHeap::post_initialize(); 166 // Need to init the tenuring threshold 167 PSScavenge::initialize(); 168 if (UseParallelOldGC) { 169 PSParallelCompact::post_initialize(); 170 } else { 171 PSMarkSweepProxy::initialize(); 172 } 173 PSPromotionManager::initialize(); 174 175 ScavengableNMethods::initialize(&_is_scavengable); 176 } 177 178 void ParallelScavengeHeap::update_counters() { 179 young_gen()->update_counters(); 180 old_gen()->update_counters(); 181 MetaspaceCounters::update_performance_counters(); 182 CompressedClassSpaceCounters::update_performance_counters(); 183 } 184 185 size_t ParallelScavengeHeap::capacity() const { 186 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 187 return value; 188 } 189 190 size_t ParallelScavengeHeap::used() const { 191 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 192 return value; 193 } 194 195 bool ParallelScavengeHeap::is_maximal_no_gc() const { 196 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 197 } 198 199 200 size_t ParallelScavengeHeap::max_capacity() const { 201 size_t estimated = reserved_region().byte_size(); 202 if (UseAdaptiveSizePolicy) { 203 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 204 } else { 205 estimated -= young_gen()->to_space()->capacity_in_bytes(); 206 } 207 return MAX2(estimated, capacity()); 208 } 209 210 bool ParallelScavengeHeap::is_in(const void* p) const { 211 return young_gen()->is_in(p) || old_gen()->is_in(p); 212 } 213 214 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 215 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 216 } 217 218 // There are two levels of allocation policy here. 219 // 220 // When an allocation request fails, the requesting thread must invoke a VM 221 // operation, transfer control to the VM thread, and await the results of a 222 // garbage collection. That is quite expensive, and we should avoid doing it 223 // multiple times if possible. 224 // 225 // To accomplish this, we have a basic allocation policy, and also a 226 // failed allocation policy. 227 // 228 // The basic allocation policy controls how you allocate memory without 229 // attempting garbage collection. It is okay to grab locks and 230 // expand the heap, if that can be done without coming to a safepoint. 231 // It is likely that the basic allocation policy will not be very 232 // aggressive. 233 // 234 // The failed allocation policy is invoked from the VM thread after 235 // the basic allocation policy is unable to satisfy a mem_allocate 236 // request. This policy needs to cover the entire range of collection, 237 // heap expansion, and out-of-memory conditions. It should make every 238 // attempt to allocate the requested memory. 239 240 // Basic allocation policy. Should never be called at a safepoint, or 241 // from the VM thread. 242 // 243 // This method must handle cases where many mem_allocate requests fail 244 // simultaneously. When that happens, only one VM operation will succeed, 245 // and the rest will not be executed. For that reason, this method loops 246 // during failed allocation attempts. If the java heap becomes exhausted, 247 // we rely on the size_policy object to force a bail out. 248 HeapWord* ParallelScavengeHeap::mem_allocate( 249 size_t size, 250 bool* gc_overhead_limit_was_exceeded) { 251 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 252 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 253 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 254 255 // In general gc_overhead_limit_was_exceeded should be false so 256 // set it so here and reset it to true only if the gc time 257 // limit is being exceeded as checked below. 258 *gc_overhead_limit_was_exceeded = false; 259 260 HeapWord* result = young_gen()->allocate(size); 261 262 uint loop_count = 0; 263 uint gc_count = 0; 264 uint gclocker_stalled_count = 0; 265 266 while (result == NULL) { 267 // We don't want to have multiple collections for a single filled generation. 268 // To prevent this, each thread tracks the total_collections() value, and if 269 // the count has changed, does not do a new collection. 270 // 271 // The collection count must be read only while holding the heap lock. VM 272 // operations also hold the heap lock during collections. There is a lock 273 // contention case where thread A blocks waiting on the Heap_lock, while 274 // thread B is holding it doing a collection. When thread A gets the lock, 275 // the collection count has already changed. To prevent duplicate collections, 276 // The policy MUST attempt allocations during the same period it reads the 277 // total_collections() value! 278 { 279 MutexLocker ml(Heap_lock); 280 gc_count = total_collections(); 281 282 result = young_gen()->allocate(size); 283 if (result != NULL) { 284 return result; 285 } 286 287 // If certain conditions hold, try allocating from the old gen. 288 result = mem_allocate_old_gen(size); 289 if (result != NULL) { 290 return result; 291 } 292 293 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 294 return NULL; 295 } 296 297 // Failed to allocate without a gc. 298 if (GCLocker::is_active_and_needs_gc()) { 299 // If this thread is not in a jni critical section, we stall 300 // the requestor until the critical section has cleared and 301 // GC allowed. When the critical section clears, a GC is 302 // initiated by the last thread exiting the critical section; so 303 // we retry the allocation sequence from the beginning of the loop, 304 // rather than causing more, now probably unnecessary, GC attempts. 305 JavaThread* jthr = JavaThread::current(); 306 if (!jthr->in_critical()) { 307 MutexUnlocker mul(Heap_lock); 308 GCLocker::stall_until_clear(); 309 gclocker_stalled_count += 1; 310 continue; 311 } else { 312 if (CheckJNICalls) { 313 fatal("Possible deadlock due to allocating while" 314 " in jni critical section"); 315 } 316 return NULL; 317 } 318 } 319 } 320 321 if (result == NULL) { 322 // Generate a VM operation 323 VM_ParallelGCFailedAllocation op(size, gc_count); 324 VMThread::execute(&op); 325 326 // Did the VM operation execute? If so, return the result directly. 327 // This prevents us from looping until time out on requests that can 328 // not be satisfied. 329 if (op.prologue_succeeded()) { 330 assert(is_in_or_null(op.result()), "result not in heap"); 331 332 // If GC was locked out during VM operation then retry allocation 333 // and/or stall as necessary. 334 if (op.gc_locked()) { 335 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 336 continue; // retry and/or stall as necessary 337 } 338 339 // Exit the loop if the gc time limit has been exceeded. 340 // The allocation must have failed above ("result" guarding 341 // this path is NULL) and the most recent collection has exceeded the 342 // gc overhead limit (although enough may have been collected to 343 // satisfy the allocation). Exit the loop so that an out-of-memory 344 // will be thrown (return a NULL ignoring the contents of 345 // op.result()), 346 // but clear gc_overhead_limit_exceeded so that the next collection 347 // starts with a clean slate (i.e., forgets about previous overhead 348 // excesses). Fill op.result() with a filler object so that the 349 // heap remains parsable. 350 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 351 const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); 352 353 if (limit_exceeded && softrefs_clear) { 354 *gc_overhead_limit_was_exceeded = true; 355 size_policy()->set_gc_overhead_limit_exceeded(false); 356 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set"); 357 if (op.result() != NULL) { 358 CollectedHeap::fill_with_object(op.result(), size); 359 } 360 return NULL; 361 } 362 363 return op.result(); 364 } 365 } 366 367 // The policy object will prevent us from looping forever. If the 368 // time spent in gc crosses a threshold, we will bail out. 369 loop_count++; 370 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 371 (loop_count % QueuedAllocationWarningCount == 0)) { 372 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 373 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 374 } 375 } 376 377 return result; 378 } 379 380 // A "death march" is a series of ultra-slow allocations in which a full gc is 381 // done before each allocation, and after the full gc the allocation still 382 // cannot be satisfied from the young gen. This routine detects that condition; 383 // it should be called after a full gc has been done and the allocation 384 // attempted from the young gen. The parameter 'addr' should be the result of 385 // that young gen allocation attempt. 386 void 387 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 388 if (addr != NULL) { 389 _death_march_count = 0; // death march has ended 390 } else if (_death_march_count == 0) { 391 if (should_alloc_in_eden(size)) { 392 _death_march_count = 1; // death march has started 393 } 394 } 395 } 396 397 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 398 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 399 // Size is too big for eden, or gc is locked out. 400 return old_gen()->allocate(size); 401 } 402 403 // If a "death march" is in progress, allocate from the old gen a limited 404 // number of times before doing a GC. 405 if (_death_march_count > 0) { 406 if (_death_march_count < 64) { 407 ++_death_march_count; 408 return old_gen()->allocate(size); 409 } else { 410 _death_march_count = 0; 411 } 412 } 413 return NULL; 414 } 415 416 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 417 if (UseParallelOldGC) { 418 // The do_full_collection() parameter clear_all_soft_refs 419 // is interpreted here as maximum_compaction which will 420 // cause SoftRefs to be cleared. 421 bool maximum_compaction = clear_all_soft_refs; 422 PSParallelCompact::invoke(maximum_compaction); 423 } else { 424 PSMarkSweepProxy::invoke(clear_all_soft_refs); 425 } 426 } 427 428 // Failed allocation policy. Must be called from the VM thread, and 429 // only at a safepoint! Note that this method has policy for allocation 430 // flow, and NOT collection policy. So we do not check for gc collection 431 // time over limit here, that is the responsibility of the heap specific 432 // collection methods. This method decides where to attempt allocations, 433 // and when to attempt collections, but no collection specific policy. 434 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 435 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 436 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 437 assert(!is_gc_active(), "not reentrant"); 438 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 439 440 // We assume that allocation in eden will fail unless we collect. 441 442 // First level allocation failure, scavenge and allocate in young gen. 443 GCCauseSetter gccs(this, GCCause::_allocation_failure); 444 const bool invoked_full_gc = PSScavenge::invoke(); 445 HeapWord* result = young_gen()->allocate(size); 446 447 // Second level allocation failure. 448 // Mark sweep and allocate in young generation. 449 if (result == NULL && !invoked_full_gc) { 450 do_full_collection(false); 451 result = young_gen()->allocate(size); 452 } 453 454 death_march_check(result, size); 455 456 // Third level allocation failure. 457 // After mark sweep and young generation allocation failure, 458 // allocate in old generation. 459 if (result == NULL) { 460 result = old_gen()->allocate(size); 461 } 462 463 // Fourth level allocation failure. We're running out of memory. 464 // More complete mark sweep and allocate in young generation. 465 if (result == NULL) { 466 do_full_collection(true); 467 result = young_gen()->allocate(size); 468 } 469 470 // Fifth level allocation failure. 471 // After more complete mark sweep, allocate in old generation. 472 if (result == NULL) { 473 result = old_gen()->allocate(size); 474 } 475 476 return result; 477 } 478 479 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 480 CollectedHeap::ensure_parsability(retire_tlabs); 481 young_gen()->eden_space()->ensure_parsability(); 482 } 483 484 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 485 return young_gen()->eden_space()->tlab_capacity(thr); 486 } 487 488 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 489 return young_gen()->eden_space()->tlab_used(thr); 490 } 491 492 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 493 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 494 } 495 496 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 497 HeapWord* result = young_gen()->allocate(requested_size); 498 if (result != NULL) { 499 *actual_size = requested_size; 500 } 501 502 return result; 503 } 504 505 void ParallelScavengeHeap::resize_all_tlabs() { 506 CollectedHeap::resize_all_tlabs(); 507 } 508 509 // This method is used by System.gc() and JVMTI. 510 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 511 assert(!Heap_lock->owned_by_self(), 512 "this thread should not own the Heap_lock"); 513 514 uint gc_count = 0; 515 uint full_gc_count = 0; 516 { 517 MutexLocker ml(Heap_lock); 518 // This value is guarded by the Heap_lock 519 gc_count = total_collections(); 520 full_gc_count = total_full_collections(); 521 } 522 523 if (GCLocker::should_discard(cause, gc_count)) { 524 return; 525 } 526 527 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 528 VMThread::execute(&op); 529 } 530 531 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 532 young_gen()->object_iterate(cl); 533 old_gen()->object_iterate(cl); 534 } 535 536 537 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 538 if (young_gen()->is_in_reserved(addr)) { 539 assert(young_gen()->is_in(addr), 540 "addr should be in allocated part of young gen"); 541 // called from os::print_location by find or VMError 542 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 543 Unimplemented(); 544 } else if (old_gen()->is_in_reserved(addr)) { 545 assert(old_gen()->is_in(addr), 546 "addr should be in allocated part of old gen"); 547 return old_gen()->start_array()->object_start((HeapWord*)addr); 548 } 549 return 0; 550 } 551 552 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 553 return block_start(addr) == addr; 554 } 555 556 jlong ParallelScavengeHeap::millis_since_last_gc() { 557 return UseParallelOldGC ? 558 PSParallelCompact::millis_since_last_gc() : 559 PSMarkSweepProxy::millis_since_last_gc(); 560 } 561 562 void ParallelScavengeHeap::prepare_for_verify() { 563 ensure_parsability(false); // no need to retire TLABs for verification 564 } 565 566 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 567 PSOldGen* old = old_gen(); 568 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 569 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 570 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 571 572 PSYoungGen* young = young_gen(); 573 VirtualSpaceSummary young_summary(young->reserved().start(), 574 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 575 576 MutableSpace* eden = young_gen()->eden_space(); 577 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 578 579 MutableSpace* from = young_gen()->from_space(); 580 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 581 582 MutableSpace* to = young_gen()->to_space(); 583 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 584 585 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 586 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 587 } 588 589 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const { 590 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr); 591 } 592 593 void ParallelScavengeHeap::print_on(outputStream* st) const { 594 young_gen()->print_on(st); 595 old_gen()->print_on(st); 596 MetaspaceUtils::print_on(st); 597 } 598 599 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 600 this->CollectedHeap::print_on_error(st); 601 602 if (UseParallelOldGC) { 603 st->cr(); 604 PSParallelCompact::print_on_error(st); 605 } 606 } 607 608 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 609 ParallelScavengeHeap::heap()->workers().threads_do(tc); 610 } 611 612 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 613 ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st); 614 } 615 616 void ParallelScavengeHeap::print_tracing_info() const { 617 AdaptiveSizePolicyOutput::print(); 618 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 619 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", 620 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds()); 621 } 622 623 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const { 624 const PSYoungGen* const young = young_gen(); 625 const MutableSpace* const eden = young->eden_space(); 626 const MutableSpace* const from = young->from_space(); 627 const PSOldGen* const old = old_gen(); 628 629 return PreGenGCValues(young->used_in_bytes(), 630 young->capacity_in_bytes(), 631 eden->used_in_bytes(), 632 eden->capacity_in_bytes(), 633 from->used_in_bytes(), 634 from->capacity_in_bytes(), 635 old->used_in_bytes(), 636 old->capacity_in_bytes()); 637 } 638 639 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { 640 const PSYoungGen* const young = young_gen(); 641 const MutableSpace* const eden = young->eden_space(); 642 const MutableSpace* const from = young->from_space(); 643 const PSOldGen* const old = old_gen(); 644 645 log_info(gc, heap)(HEAP_CHANGE_FORMAT" " 646 HEAP_CHANGE_FORMAT" " 647 HEAP_CHANGE_FORMAT, 648 HEAP_CHANGE_FORMAT_ARGS(young->name(), 649 pre_gc_values.young_gen_used(), 650 pre_gc_values.young_gen_capacity(), 651 young->used_in_bytes(), 652 young->capacity_in_bytes()), 653 HEAP_CHANGE_FORMAT_ARGS("Eden", 654 pre_gc_values.eden_used(), 655 pre_gc_values.eden_capacity(), 656 eden->used_in_bytes(), 657 eden->capacity_in_bytes()), 658 HEAP_CHANGE_FORMAT_ARGS("From", 659 pre_gc_values.from_used(), 660 pre_gc_values.from_capacity(), 661 from->used_in_bytes(), 662 from->capacity_in_bytes())); 663 log_info(gc, heap)(HEAP_CHANGE_FORMAT, 664 HEAP_CHANGE_FORMAT_ARGS(old->name(), 665 pre_gc_values.old_gen_used(), 666 pre_gc_values.old_gen_capacity(), 667 old->used_in_bytes(), 668 old->capacity_in_bytes())); 669 MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes()); 670 } 671 672 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 673 // Why do we need the total_collections()-filter below? 674 if (total_collections() > 0) { 675 log_debug(gc, verify)("Tenured"); 676 old_gen()->verify(); 677 678 log_debug(gc, verify)("Eden"); 679 young_gen()->verify(); 680 } 681 } 682 683 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 684 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 685 gc_tracer->report_gc_heap_summary(when, heap_summary); 686 687 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 688 gc_tracer->report_metaspace_summary(when, metaspace_summary); 689 } 690 691 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 692 CollectedHeap* heap = Universe::heap(); 693 assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 694 assert(heap->kind() == CollectedHeap::Parallel, "Invalid name"); 695 return (ParallelScavengeHeap*)heap; 696 } 697 698 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() { 699 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 700 } 701 702 PSCardTable* ParallelScavengeHeap::card_table() { 703 return static_cast<PSCardTable*>(barrier_set()->card_table()); 704 } 705 706 // Before delegating the resize to the young generation, 707 // the reserved space for the young and old generations 708 // may be changed to accommodate the desired resize. 709 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 710 size_t survivor_size) { 711 if (UseAdaptiveGCBoundary) { 712 if (size_policy()->bytes_absorbed_from_eden() != 0) { 713 size_policy()->reset_bytes_absorbed_from_eden(); 714 return; // The generation changed size already. 715 } 716 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 717 } 718 719 // Delegate the resize to the generation. 720 _young_gen->resize(eden_size, survivor_size); 721 } 722 723 // Before delegating the resize to the old generation, 724 // the reserved space for the young and old generations 725 // may be changed to accommodate the desired resize. 726 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 727 if (UseAdaptiveGCBoundary) { 728 if (size_policy()->bytes_absorbed_from_eden() != 0) { 729 size_policy()->reset_bytes_absorbed_from_eden(); 730 return; // The generation changed size already. 731 } 732 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 733 } 734 735 // Delegate the resize to the generation. 736 _old_gen->resize(desired_free_space); 737 } 738 739 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 740 // nothing particular 741 } 742 743 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 744 // nothing particular 745 } 746 747 #ifndef PRODUCT 748 void ParallelScavengeHeap::record_gen_tops_before_GC() { 749 if (ZapUnusedHeapArea) { 750 young_gen()->record_spaces_top(); 751 old_gen()->record_spaces_top(); 752 } 753 } 754 755 void ParallelScavengeHeap::gen_mangle_unused_area() { 756 if (ZapUnusedHeapArea) { 757 young_gen()->eden_space()->mangle_unused_area(); 758 young_gen()->to_space()->mangle_unused_area(); 759 young_gen()->from_space()->mangle_unused_area(); 760 old_gen()->object_space()->mangle_unused_area(); 761 } 762 } 763 #endif 764 765 void ParallelScavengeHeap::register_nmethod(nmethod* nm) { 766 ScavengableNMethods::register_nmethod(nm); 767 } 768 769 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) { 770 ScavengableNMethods::unregister_nmethod(nm); 771 } 772 773 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { 774 ScavengableNMethods::verify_nmethod(nm); 775 } 776 777 void ParallelScavengeHeap::flush_nmethod(nmethod* nm) { 778 // nothing particular 779 } 780 781 void ParallelScavengeHeap::prune_scavengable_nmethods() { 782 ScavengableNMethods::prune_nmethods(); 783 } 784 785 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() { 786 GrowableArray<GCMemoryManager*> memory_managers(2); 787 memory_managers.append(_young_manager); 788 memory_managers.append(_old_manager); 789 return memory_managers; 790 } 791 792 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() { 793 GrowableArray<MemoryPool*> memory_pools(3); 794 memory_pools.append(_eden_pool); 795 memory_pools.append(_survivor_pool); 796 memory_pools.append(_old_pool); 797 return memory_pools; 798 }