1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "gc/parallel/adjoiningGenerations.hpp" 28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp" 29 #include "gc/parallel/adjoiningVirtualSpaces.hpp" 30 #include "gc/parallel/gcTaskManager.hpp" 31 #include "gc/parallel/generationSizer.hpp" 32 #include "gc/parallel/objectStartArray.inline.hpp" 33 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 34 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 35 #include "gc/parallel/psMarkSweepProxy.hpp" 36 #include "gc/parallel/psMemoryPool.hpp" 37 #include "gc/parallel/psParallelCompact.inline.hpp" 38 #include "gc/parallel/psPromotionManager.hpp" 39 #include "gc/parallel/psScavenge.hpp" 40 #include "gc/parallel/psVMOperations.hpp" 41 #include "gc/shared/gcHeapSummary.hpp" 42 #include "gc/shared/gcLocker.hpp" 43 #include "gc/shared/gcWhen.hpp" 44 #include "gc/shared/scavengableNMethods.hpp" 45 #include "logging/log.hpp" 46 #include "memory/metaspaceCounters.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/handles.inline.hpp" 49 #include "runtime/java.hpp" 50 #include "runtime/vmThread.hpp" 51 #include "services/memoryManager.hpp" 52 #include "services/memTracker.hpp" 53 #include "utilities/macros.hpp" 54 #include "utilities/vmError.hpp" 55 56 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 57 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 58 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 59 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 60 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 61 62 jint ParallelScavengeHeap::initialize() { 63 size_t heap_size = _collector_policy->heap_reserved_size_bytes(); 64 65 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); 66 67 os::trace_page_sizes("Heap", 68 _collector_policy->min_heap_byte_size(), 69 heap_size, 70 generation_alignment(), 71 heap_rs.base(), 72 heap_rs.size()); 73 74 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); 75 76 PSCardTable* card_table = new PSCardTable(reserved_region()); 77 card_table->initialize(); 78 CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); 79 barrier_set->initialize(); 80 BarrierSet::set_barrier_set(barrier_set); 81 82 // Make up the generations 83 // Calculate the maximum size that a generation can grow. This 84 // includes growth into the other generation. Note that the 85 // parameter _max_gen_size is kept as the maximum 86 // size of the generation as the boundaries currently stand. 87 // _max_gen_size is still used as that value. 88 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 89 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 90 91 _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment()); 92 93 _old_gen = _gens->old_gen(); 94 _young_gen = _gens->young_gen(); 95 96 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 97 const size_t old_capacity = _old_gen->capacity_in_bytes(); 98 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 99 _size_policy = 100 new PSAdaptiveSizePolicy(eden_capacity, 101 initial_promo_size, 102 young_gen()->to_space()->capacity_in_bytes(), 103 _collector_policy->gen_alignment(), 104 max_gc_pause_sec, 105 max_gc_minor_pause_sec, 106 GCTimeRatio 107 ); 108 109 assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary || 110 (old_gen()->virtual_space()->high_boundary() == 111 young_gen()->virtual_space()->low_boundary()), 112 "Boundaries must meet"); 113 // initialize the policy counters - 2 collectors, 2 generations 114 _gc_policy_counters = 115 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy); 116 117 // Set up the GCTaskManager 118 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 119 120 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 121 return JNI_ENOMEM; 122 } 123 124 return JNI_OK; 125 } 126 127 void ParallelScavengeHeap::initialize_serviceability() { 128 129 _eden_pool = new EdenMutableSpacePool(_young_gen, 130 _young_gen->eden_space(), 131 "PS Eden Space", 132 false /* support_usage_threshold */); 133 134 _survivor_pool = new SurvivorMutableSpacePool(_young_gen, 135 "PS Survivor Space", 136 false /* support_usage_threshold */); 137 138 _old_pool = new PSGenerationPool(_old_gen, 139 "PS Old Gen", 140 true /* support_usage_threshold */); 141 142 _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC"); 143 _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC"); 144 145 _old_manager->add_pool(_eden_pool); 146 _old_manager->add_pool(_survivor_pool); 147 _old_manager->add_pool(_old_pool); 148 149 _young_manager->add_pool(_eden_pool); 150 _young_manager->add_pool(_survivor_pool); 151 152 } 153 154 namespace { 155 class PSIsScavengable : public BoolObjectClosure { 156 bool do_object_b(oop obj) { 157 return ParallelScavengeHeap::heap()->is_in_young(obj); 158 } 159 }; 160 161 PSIsScavengable _is_scavengable; 162 } 163 164 165 void ParallelScavengeHeap::post_initialize() { 166 CollectedHeap::post_initialize(); 167 // Need to init the tenuring threshold 168 PSScavenge::initialize(); 169 if (UseParallelOldGC) { 170 PSParallelCompact::post_initialize(); 171 } else { 172 PSMarkSweepProxy::initialize(); 173 } 174 PSPromotionManager::initialize(); 175 176 ScavengableNMethods::initialize(&_is_scavengable); 177 } 178 179 void ParallelScavengeHeap::update_counters() { 180 young_gen()->update_counters(); 181 old_gen()->update_counters(); 182 MetaspaceCounters::update_performance_counters(); 183 CompressedClassSpaceCounters::update_performance_counters(); 184 } 185 186 size_t ParallelScavengeHeap::capacity() const { 187 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 188 return value; 189 } 190 191 size_t ParallelScavengeHeap::used() const { 192 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 193 return value; 194 } 195 196 bool ParallelScavengeHeap::is_maximal_no_gc() const { 197 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 198 } 199 200 201 size_t ParallelScavengeHeap::max_capacity() const { 202 size_t estimated = reserved_region().byte_size(); 203 if (UseAdaptiveSizePolicy) { 204 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 205 } else { 206 estimated -= young_gen()->to_space()->capacity_in_bytes(); 207 } 208 return MAX2(estimated, capacity()); 209 } 210 211 bool ParallelScavengeHeap::is_in(const void* p) const { 212 return young_gen()->is_in(p) || old_gen()->is_in(p); 213 } 214 215 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 216 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 217 } 218 219 // There are two levels of allocation policy here. 220 // 221 // When an allocation request fails, the requesting thread must invoke a VM 222 // operation, transfer control to the VM thread, and await the results of a 223 // garbage collection. That is quite expensive, and we should avoid doing it 224 // multiple times if possible. 225 // 226 // To accomplish this, we have a basic allocation policy, and also a 227 // failed allocation policy. 228 // 229 // The basic allocation policy controls how you allocate memory without 230 // attempting garbage collection. It is okay to grab locks and 231 // expand the heap, if that can be done without coming to a safepoint. 232 // It is likely that the basic allocation policy will not be very 233 // aggressive. 234 // 235 // The failed allocation policy is invoked from the VM thread after 236 // the basic allocation policy is unable to satisfy a mem_allocate 237 // request. This policy needs to cover the entire range of collection, 238 // heap expansion, and out-of-memory conditions. It should make every 239 // attempt to allocate the requested memory. 240 241 // Basic allocation policy. Should never be called at a safepoint, or 242 // from the VM thread. 243 // 244 // This method must handle cases where many mem_allocate requests fail 245 // simultaneously. When that happens, only one VM operation will succeed, 246 // and the rest will not be executed. For that reason, this method loops 247 // during failed allocation attempts. If the java heap becomes exhausted, 248 // we rely on the size_policy object to force a bail out. 249 HeapWord* ParallelScavengeHeap::mem_allocate( 250 size_t size, 251 bool* gc_overhead_limit_was_exceeded) { 252 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 253 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 254 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 255 256 // In general gc_overhead_limit_was_exceeded should be false so 257 // set it so here and reset it to true only if the gc time 258 // limit is being exceeded as checked below. 259 *gc_overhead_limit_was_exceeded = false; 260 261 HeapWord* result = young_gen()->allocate(size); 262 263 uint loop_count = 0; 264 uint gc_count = 0; 265 uint gclocker_stalled_count = 0; 266 267 while (result == NULL) { 268 // We don't want to have multiple collections for a single filled generation. 269 // To prevent this, each thread tracks the total_collections() value, and if 270 // the count has changed, does not do a new collection. 271 // 272 // The collection count must be read only while holding the heap lock. VM 273 // operations also hold the heap lock during collections. There is a lock 274 // contention case where thread A blocks waiting on the Heap_lock, while 275 // thread B is holding it doing a collection. When thread A gets the lock, 276 // the collection count has already changed. To prevent duplicate collections, 277 // The policy MUST attempt allocations during the same period it reads the 278 // total_collections() value! 279 { 280 MutexLocker ml(Heap_lock); 281 gc_count = total_collections(); 282 283 result = young_gen()->allocate(size); 284 if (result != NULL) { 285 return result; 286 } 287 288 // If certain conditions hold, try allocating from the old gen. 289 result = mem_allocate_old_gen(size); 290 if (result != NULL) { 291 return result; 292 } 293 294 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 295 return NULL; 296 } 297 298 // Failed to allocate without a gc. 299 if (GCLocker::is_active_and_needs_gc()) { 300 // If this thread is not in a jni critical section, we stall 301 // the requestor until the critical section has cleared and 302 // GC allowed. When the critical section clears, a GC is 303 // initiated by the last thread exiting the critical section; so 304 // we retry the allocation sequence from the beginning of the loop, 305 // rather than causing more, now probably unnecessary, GC attempts. 306 JavaThread* jthr = JavaThread::current(); 307 if (!jthr->in_critical()) { 308 MutexUnlocker mul(Heap_lock); 309 GCLocker::stall_until_clear(); 310 gclocker_stalled_count += 1; 311 continue; 312 } else { 313 if (CheckJNICalls) { 314 fatal("Possible deadlock due to allocating while" 315 " in jni critical section"); 316 } 317 return NULL; 318 } 319 } 320 } 321 322 if (result == NULL) { 323 // Generate a VM operation 324 VM_ParallelGCFailedAllocation op(size, gc_count); 325 VMThread::execute(&op); 326 327 // Did the VM operation execute? If so, return the result directly. 328 // This prevents us from looping until time out on requests that can 329 // not be satisfied. 330 if (op.prologue_succeeded()) { 331 assert(is_in_or_null(op.result()), "result not in heap"); 332 333 // If GC was locked out during VM operation then retry allocation 334 // and/or stall as necessary. 335 if (op.gc_locked()) { 336 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 337 continue; // retry and/or stall as necessary 338 } 339 340 // Exit the loop if the gc time limit has been exceeded. 341 // The allocation must have failed above ("result" guarding 342 // this path is NULL) and the most recent collection has exceeded the 343 // gc overhead limit (although enough may have been collected to 344 // satisfy the allocation). Exit the loop so that an out-of-memory 345 // will be thrown (return a NULL ignoring the contents of 346 // op.result()), 347 // but clear gc_overhead_limit_exceeded so that the next collection 348 // starts with a clean slate (i.e., forgets about previous overhead 349 // excesses). Fill op.result() with a filler object so that the 350 // heap remains parsable. 351 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 352 const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); 353 354 if (limit_exceeded && softrefs_clear) { 355 *gc_overhead_limit_was_exceeded = true; 356 size_policy()->set_gc_overhead_limit_exceeded(false); 357 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set"); 358 if (op.result() != NULL) { 359 CollectedHeap::fill_with_object(op.result(), size); 360 } 361 return NULL; 362 } 363 364 return op.result(); 365 } 366 } 367 368 // The policy object will prevent us from looping forever. If the 369 // time spent in gc crosses a threshold, we will bail out. 370 loop_count++; 371 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 372 (loop_count % QueuedAllocationWarningCount == 0)) { 373 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 374 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 375 } 376 } 377 378 return result; 379 } 380 381 // A "death march" is a series of ultra-slow allocations in which a full gc is 382 // done before each allocation, and after the full gc the allocation still 383 // cannot be satisfied from the young gen. This routine detects that condition; 384 // it should be called after a full gc has been done and the allocation 385 // attempted from the young gen. The parameter 'addr' should be the result of 386 // that young gen allocation attempt. 387 void 388 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 389 if (addr != NULL) { 390 _death_march_count = 0; // death march has ended 391 } else if (_death_march_count == 0) { 392 if (should_alloc_in_eden(size)) { 393 _death_march_count = 1; // death march has started 394 } 395 } 396 } 397 398 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 399 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 400 // Size is too big for eden, or gc is locked out. 401 return old_gen()->allocate(size); 402 } 403 404 // If a "death march" is in progress, allocate from the old gen a limited 405 // number of times before doing a GC. 406 if (_death_march_count > 0) { 407 if (_death_march_count < 64) { 408 ++_death_march_count; 409 return old_gen()->allocate(size); 410 } else { 411 _death_march_count = 0; 412 } 413 } 414 return NULL; 415 } 416 417 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 418 if (UseParallelOldGC) { 419 // The do_full_collection() parameter clear_all_soft_refs 420 // is interpreted here as maximum_compaction which will 421 // cause SoftRefs to be cleared. 422 bool maximum_compaction = clear_all_soft_refs; 423 PSParallelCompact::invoke(maximum_compaction); 424 } else { 425 PSMarkSweepProxy::invoke(clear_all_soft_refs); 426 } 427 } 428 429 // Failed allocation policy. Must be called from the VM thread, and 430 // only at a safepoint! Note that this method has policy for allocation 431 // flow, and NOT collection policy. So we do not check for gc collection 432 // time over limit here, that is the responsibility of the heap specific 433 // collection methods. This method decides where to attempt allocations, 434 // and when to attempt collections, but no collection specific policy. 435 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 436 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 437 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 438 assert(!is_gc_active(), "not reentrant"); 439 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 440 441 // We assume that allocation in eden will fail unless we collect. 442 443 // First level allocation failure, scavenge and allocate in young gen. 444 GCCauseSetter gccs(this, GCCause::_allocation_failure); 445 const bool invoked_full_gc = PSScavenge::invoke(); 446 HeapWord* result = young_gen()->allocate(size); 447 448 // Second level allocation failure. 449 // Mark sweep and allocate in young generation. 450 if (result == NULL && !invoked_full_gc) { 451 do_full_collection(false); 452 result = young_gen()->allocate(size); 453 } 454 455 death_march_check(result, size); 456 457 // Third level allocation failure. 458 // After mark sweep and young generation allocation failure, 459 // allocate in old generation. 460 if (result == NULL) { 461 result = old_gen()->allocate(size); 462 } 463 464 // Fourth level allocation failure. We're running out of memory. 465 // More complete mark sweep and allocate in young generation. 466 if (result == NULL) { 467 do_full_collection(true); 468 result = young_gen()->allocate(size); 469 } 470 471 // Fifth level allocation failure. 472 // After more complete mark sweep, allocate in old generation. 473 if (result == NULL) { 474 result = old_gen()->allocate(size); 475 } 476 477 return result; 478 } 479 480 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 481 CollectedHeap::ensure_parsability(retire_tlabs); 482 young_gen()->eden_space()->ensure_parsability(); 483 } 484 485 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 486 return young_gen()->eden_space()->tlab_capacity(thr); 487 } 488 489 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 490 return young_gen()->eden_space()->tlab_used(thr); 491 } 492 493 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 494 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 495 } 496 497 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 498 HeapWord* result = young_gen()->allocate(requested_size); 499 if (result != NULL) { 500 *actual_size = requested_size; 501 } 502 503 return result; 504 } 505 506 void ParallelScavengeHeap::resize_all_tlabs() { 507 CollectedHeap::resize_all_tlabs(); 508 } 509 510 // This method is used by System.gc() and JVMTI. 511 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 512 assert(!Heap_lock->owned_by_self(), 513 "this thread should not own the Heap_lock"); 514 515 uint gc_count = 0; 516 uint full_gc_count = 0; 517 { 518 MutexLocker ml(Heap_lock); 519 // This value is guarded by the Heap_lock 520 gc_count = total_collections(); 521 full_gc_count = total_full_collections(); 522 } 523 524 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 525 VMThread::execute(&op); 526 } 527 528 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 529 young_gen()->object_iterate(cl); 530 old_gen()->object_iterate(cl); 531 } 532 533 534 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 535 if (young_gen()->is_in_reserved(addr)) { 536 assert(young_gen()->is_in(addr), 537 "addr should be in allocated part of young gen"); 538 // called from os::print_location by find or VMError 539 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 540 Unimplemented(); 541 } else if (old_gen()->is_in_reserved(addr)) { 542 assert(old_gen()->is_in(addr), 543 "addr should be in allocated part of old gen"); 544 return old_gen()->start_array()->object_start((HeapWord*)addr); 545 } 546 return 0; 547 } 548 549 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 550 return oop(addr)->size(); 551 } 552 553 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 554 return block_start(addr) == addr; 555 } 556 557 jlong ParallelScavengeHeap::millis_since_last_gc() { 558 return UseParallelOldGC ? 559 PSParallelCompact::millis_since_last_gc() : 560 PSMarkSweepProxy::millis_since_last_gc(); 561 } 562 563 void ParallelScavengeHeap::prepare_for_verify() { 564 ensure_parsability(false); // no need to retire TLABs for verification 565 } 566 567 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 568 PSOldGen* old = old_gen(); 569 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 570 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 571 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 572 573 PSYoungGen* young = young_gen(); 574 VirtualSpaceSummary young_summary(young->reserved().start(), 575 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 576 577 MutableSpace* eden = young_gen()->eden_space(); 578 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 579 580 MutableSpace* from = young_gen()->from_space(); 581 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 582 583 MutableSpace* to = young_gen()->to_space(); 584 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 585 586 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 587 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 588 } 589 590 void ParallelScavengeHeap::print_on(outputStream* st) const { 591 young_gen()->print_on(st); 592 old_gen()->print_on(st); 593 MetaspaceUtils::print_on(st); 594 } 595 596 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 597 this->CollectedHeap::print_on_error(st); 598 599 if (UseParallelOldGC) { 600 st->cr(); 601 PSParallelCompact::print_on_error(st); 602 } 603 } 604 605 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 606 PSScavenge::gc_task_manager()->threads_do(tc); 607 } 608 609 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 610 PSScavenge::gc_task_manager()->print_threads_on(st); 611 } 612 613 void ParallelScavengeHeap::print_tracing_info() const { 614 AdaptiveSizePolicyOutput::print(); 615 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 616 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", 617 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds()); 618 } 619 620 621 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 622 // Why do we need the total_collections()-filter below? 623 if (total_collections() > 0) { 624 log_debug(gc, verify)("Tenured"); 625 old_gen()->verify(); 626 627 log_debug(gc, verify)("Eden"); 628 young_gen()->verify(); 629 } 630 } 631 632 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 633 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 634 gc_tracer->report_gc_heap_summary(when, heap_summary); 635 636 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 637 gc_tracer->report_metaspace_summary(when, metaspace_summary); 638 } 639 640 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 641 CollectedHeap* heap = Universe::heap(); 642 assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 643 assert(heap->kind() == CollectedHeap::Parallel, "Invalid name"); 644 return (ParallelScavengeHeap*)heap; 645 } 646 647 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() { 648 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 649 } 650 651 PSCardTable* ParallelScavengeHeap::card_table() { 652 return static_cast<PSCardTable*>(barrier_set()->card_table()); 653 } 654 655 // Before delegating the resize to the young generation, 656 // the reserved space for the young and old generations 657 // may be changed to accommodate the desired resize. 658 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 659 size_t survivor_size) { 660 if (UseAdaptiveGCBoundary) { 661 if (size_policy()->bytes_absorbed_from_eden() != 0) { 662 size_policy()->reset_bytes_absorbed_from_eden(); 663 return; // The generation changed size already. 664 } 665 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 666 } 667 668 // Delegate the resize to the generation. 669 _young_gen->resize(eden_size, survivor_size); 670 } 671 672 // Before delegating the resize to the old generation, 673 // the reserved space for the young and old generations 674 // may be changed to accommodate the desired resize. 675 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 676 if (UseAdaptiveGCBoundary) { 677 if (size_policy()->bytes_absorbed_from_eden() != 0) { 678 size_policy()->reset_bytes_absorbed_from_eden(); 679 return; // The generation changed size already. 680 } 681 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 682 } 683 684 // Delegate the resize to the generation. 685 _old_gen->resize(desired_free_space); 686 } 687 688 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 689 // nothing particular 690 } 691 692 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 693 // nothing particular 694 } 695 696 #ifndef PRODUCT 697 void ParallelScavengeHeap::record_gen_tops_before_GC() { 698 if (ZapUnusedHeapArea) { 699 young_gen()->record_spaces_top(); 700 old_gen()->record_spaces_top(); 701 } 702 } 703 704 void ParallelScavengeHeap::gen_mangle_unused_area() { 705 if (ZapUnusedHeapArea) { 706 young_gen()->eden_space()->mangle_unused_area(); 707 young_gen()->to_space()->mangle_unused_area(); 708 young_gen()->from_space()->mangle_unused_area(); 709 old_gen()->object_space()->mangle_unused_area(); 710 } 711 } 712 #endif 713 714 void ParallelScavengeHeap::register_nmethod(nmethod* nm) { 715 ScavengableNMethods::register_nmethod(nm); 716 } 717 718 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) { 719 ScavengableNMethods::unregister_nmethod(nm); 720 } 721 722 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { 723 ScavengableNMethods::verify_nmethod(nm); 724 } 725 726 void ParallelScavengeHeap::flush_nmethod(nmethod* nm) { 727 ScavengableNMethods::flush_nmethod(nm); 728 } 729 730 void ParallelScavengeHeap::prune_nmethods() { 731 ScavengableNMethods::prune_nmethods(); 732 } 733 734 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() { 735 GrowableArray<GCMemoryManager*> memory_managers(2); 736 memory_managers.append(_young_manager); 737 memory_managers.append(_old_manager); 738 return memory_managers; 739 } 740 741 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() { 742 GrowableArray<MemoryPool*> memory_pools(3); 743 memory_pools.append(_eden_pool); 744 memory_pools.append(_survivor_pool); 745 memory_pools.append(_old_pool); 746 return memory_pools; 747 }