1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/adjoiningGenerations.hpp" 27 #include "gc/parallel/adjoiningVirtualSpaces.hpp" 28 #include "gc/parallel/gcTaskManager.hpp" 29 #include "gc/parallel/generationSizer.hpp" 30 #include "gc/parallel/objectStartArray.inline.hpp" 31 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 32 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 33 #include "gc/parallel/psMarkSweep.hpp" 34 #include "gc/parallel/psParallelCompact.inline.hpp" 35 #include "gc/parallel/psPromotionManager.hpp" 36 #include "gc/parallel/psScavenge.hpp" 37 #include "gc/parallel/vmPSOperations.hpp" 38 #include "gc/shared/gcHeapSummary.hpp" 39 #include "gc/shared/gcLocker.inline.hpp" 40 #include "gc/shared/gcWhen.hpp" 41 #include "logging/log.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/java.hpp" 45 #include "runtime/vmThread.hpp" 46 #include "services/memTracker.hpp" 47 #include "utilities/vmError.hpp" 48 49 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 50 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 51 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 52 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 54 55 jint ParallelScavengeHeap::initialize() { 56 CollectedHeap::pre_initialize(); 57 58 const size_t heap_size = _collector_policy->max_heap_byte_size(); 59 60 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); 61 62 os::trace_page_sizes("Heap", 63 _collector_policy->min_heap_byte_size(), 64 heap_size, 65 generation_alignment(), 66 heap_rs.base(), 67 heap_rs.size()); 68 69 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); 70 71 PSCardTable* card_table = new PSCardTable(reserved_region()); 72 card_table->initialize(); 73 CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table); 74 barrier_set->initialize(); 75 set_barrier_set(barrier_set); 76 77 // Make up the generations 78 // Calculate the maximum size that a generation can grow. This 79 // includes growth into the other generation. Note that the 80 // parameter _max_gen_size is kept as the maximum 81 // size of the generation as the boundaries currently stand. 82 // _max_gen_size is still used as that value. 83 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 84 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 85 86 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); 87 88 _old_gen = _gens->old_gen(); 89 _young_gen = _gens->young_gen(); 90 91 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 92 const size_t old_capacity = _old_gen->capacity_in_bytes(); 93 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 94 _size_policy = 95 new PSAdaptiveSizePolicy(eden_capacity, 96 initial_promo_size, 97 young_gen()->to_space()->capacity_in_bytes(), 98 _collector_policy->gen_alignment(), 99 max_gc_pause_sec, 100 max_gc_minor_pause_sec, 101 GCTimeRatio 102 ); 103 104 assert(!UseAdaptiveGCBoundary || 105 (old_gen()->virtual_space()->high_boundary() == 106 young_gen()->virtual_space()->low_boundary()), 107 "Boundaries must meet"); 108 // initialize the policy counters - 2 collectors, 3 generations 109 _gc_policy_counters = 110 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 111 112 // Set up the GCTaskManager 113 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 114 115 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 116 return JNI_ENOMEM; 117 } 118 119 return JNI_OK; 120 } 121 122 void ParallelScavengeHeap::post_initialize() { 123 // Need to init the tenuring threshold 124 PSScavenge::initialize(); 125 if (UseParallelOldGC) { 126 PSParallelCompact::post_initialize(); 127 } else { 128 PSMarkSweep::initialize(); 129 } 130 PSPromotionManager::initialize(); 131 } 132 133 void ParallelScavengeHeap::update_counters() { 134 young_gen()->update_counters(); 135 old_gen()->update_counters(); 136 MetaspaceCounters::update_performance_counters(); 137 CompressedClassSpaceCounters::update_performance_counters(); 138 } 139 140 size_t ParallelScavengeHeap::capacity() const { 141 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 142 return value; 143 } 144 145 size_t ParallelScavengeHeap::used() const { 146 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 147 return value; 148 } 149 150 bool ParallelScavengeHeap::is_maximal_no_gc() const { 151 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 152 } 153 154 155 size_t ParallelScavengeHeap::max_capacity() const { 156 size_t estimated = reserved_region().byte_size(); 157 if (UseAdaptiveSizePolicy) { 158 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 159 } else { 160 estimated -= young_gen()->to_space()->capacity_in_bytes(); 161 } 162 return MAX2(estimated, capacity()); 163 } 164 165 bool ParallelScavengeHeap::is_in(const void* p) const { 166 return young_gen()->is_in(p) || old_gen()->is_in(p); 167 } 168 169 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 170 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 171 } 172 173 bool ParallelScavengeHeap::is_scavengable(const void* addr) { 174 return is_in_young((oop)addr); 175 } 176 177 // There are two levels of allocation policy here. 178 // 179 // When an allocation request fails, the requesting thread must invoke a VM 180 // operation, transfer control to the VM thread, and await the results of a 181 // garbage collection. That is quite expensive, and we should avoid doing it 182 // multiple times if possible. 183 // 184 // To accomplish this, we have a basic allocation policy, and also a 185 // failed allocation policy. 186 // 187 // The basic allocation policy controls how you allocate memory without 188 // attempting garbage collection. It is okay to grab locks and 189 // expand the heap, if that can be done without coming to a safepoint. 190 // It is likely that the basic allocation policy will not be very 191 // aggressive. 192 // 193 // The failed allocation policy is invoked from the VM thread after 194 // the basic allocation policy is unable to satisfy a mem_allocate 195 // request. This policy needs to cover the entire range of collection, 196 // heap expansion, and out-of-memory conditions. It should make every 197 // attempt to allocate the requested memory. 198 199 // Basic allocation policy. Should never be called at a safepoint, or 200 // from the VM thread. 201 // 202 // This method must handle cases where many mem_allocate requests fail 203 // simultaneously. When that happens, only one VM operation will succeed, 204 // and the rest will not be executed. For that reason, this method loops 205 // during failed allocation attempts. If the java heap becomes exhausted, 206 // we rely on the size_policy object to force a bail out. 207 HeapWord* ParallelScavengeHeap::mem_allocate( 208 size_t size, 209 bool* gc_overhead_limit_was_exceeded) { 210 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 211 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 212 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 213 214 // In general gc_overhead_limit_was_exceeded should be false so 215 // set it so here and reset it to true only if the gc time 216 // limit is being exceeded as checked below. 217 *gc_overhead_limit_was_exceeded = false; 218 219 HeapWord* result = young_gen()->allocate(size); 220 221 uint loop_count = 0; 222 uint gc_count = 0; 223 uint gclocker_stalled_count = 0; 224 225 while (result == NULL) { 226 // We don't want to have multiple collections for a single filled generation. 227 // To prevent this, each thread tracks the total_collections() value, and if 228 // the count has changed, does not do a new collection. 229 // 230 // The collection count must be read only while holding the heap lock. VM 231 // operations also hold the heap lock during collections. There is a lock 232 // contention case where thread A blocks waiting on the Heap_lock, while 233 // thread B is holding it doing a collection. When thread A gets the lock, 234 // the collection count has already changed. To prevent duplicate collections, 235 // The policy MUST attempt allocations during the same period it reads the 236 // total_collections() value! 237 { 238 MutexLocker ml(Heap_lock); 239 gc_count = total_collections(); 240 241 result = young_gen()->allocate(size); 242 if (result != NULL) { 243 return result; 244 } 245 246 // If certain conditions hold, try allocating from the old gen. 247 result = mem_allocate_old_gen(size); 248 if (result != NULL) { 249 return result; 250 } 251 252 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 253 return NULL; 254 } 255 256 // Failed to allocate without a gc. 257 if (GCLocker::is_active_and_needs_gc()) { 258 // If this thread is not in a jni critical section, we stall 259 // the requestor until the critical section has cleared and 260 // GC allowed. When the critical section clears, a GC is 261 // initiated by the last thread exiting the critical section; so 262 // we retry the allocation sequence from the beginning of the loop, 263 // rather than causing more, now probably unnecessary, GC attempts. 264 JavaThread* jthr = JavaThread::current(); 265 if (!jthr->in_critical()) { 266 MutexUnlocker mul(Heap_lock); 267 GCLocker::stall_until_clear(); 268 gclocker_stalled_count += 1; 269 continue; 270 } else { 271 if (CheckJNICalls) { 272 fatal("Possible deadlock due to allocating while" 273 " in jni critical section"); 274 } 275 return NULL; 276 } 277 } 278 } 279 280 if (result == NULL) { 281 // Generate a VM operation 282 VM_ParallelGCFailedAllocation op(size, gc_count); 283 VMThread::execute(&op); 284 285 // Did the VM operation execute? If so, return the result directly. 286 // This prevents us from looping until time out on requests that can 287 // not be satisfied. 288 if (op.prologue_succeeded()) { 289 assert(is_in_or_null(op.result()), "result not in heap"); 290 291 // If GC was locked out during VM operation then retry allocation 292 // and/or stall as necessary. 293 if (op.gc_locked()) { 294 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 295 continue; // retry and/or stall as necessary 296 } 297 298 // Exit the loop if the gc time limit has been exceeded. 299 // The allocation must have failed above ("result" guarding 300 // this path is NULL) and the most recent collection has exceeded the 301 // gc overhead limit (although enough may have been collected to 302 // satisfy the allocation). Exit the loop so that an out-of-memory 303 // will be thrown (return a NULL ignoring the contents of 304 // op.result()), 305 // but clear gc_overhead_limit_exceeded so that the next collection 306 // starts with a clean slate (i.e., forgets about previous overhead 307 // excesses). Fill op.result() with a filler object so that the 308 // heap remains parsable. 309 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 310 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 311 312 if (limit_exceeded && softrefs_clear) { 313 *gc_overhead_limit_was_exceeded = true; 314 size_policy()->set_gc_overhead_limit_exceeded(false); 315 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set"); 316 if (op.result() != NULL) { 317 CollectedHeap::fill_with_object(op.result(), size); 318 } 319 return NULL; 320 } 321 322 return op.result(); 323 } 324 } 325 326 // The policy object will prevent us from looping forever. If the 327 // time spent in gc crosses a threshold, we will bail out. 328 loop_count++; 329 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 330 (loop_count % QueuedAllocationWarningCount == 0)) { 331 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 332 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 333 } 334 } 335 336 return result; 337 } 338 339 // A "death march" is a series of ultra-slow allocations in which a full gc is 340 // done before each allocation, and after the full gc the allocation still 341 // cannot be satisfied from the young gen. This routine detects that condition; 342 // it should be called after a full gc has been done and the allocation 343 // attempted from the young gen. The parameter 'addr' should be the result of 344 // that young gen allocation attempt. 345 void 346 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 347 if (addr != NULL) { 348 _death_march_count = 0; // death march has ended 349 } else if (_death_march_count == 0) { 350 if (should_alloc_in_eden(size)) { 351 _death_march_count = 1; // death march has started 352 } 353 } 354 } 355 356 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 357 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 358 // Size is too big for eden, or gc is locked out. 359 return old_gen()->allocate(size); 360 } 361 362 // If a "death march" is in progress, allocate from the old gen a limited 363 // number of times before doing a GC. 364 if (_death_march_count > 0) { 365 if (_death_march_count < 64) { 366 ++_death_march_count; 367 return old_gen()->allocate(size); 368 } else { 369 _death_march_count = 0; 370 } 371 } 372 return NULL; 373 } 374 375 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 376 if (UseParallelOldGC) { 377 // The do_full_collection() parameter clear_all_soft_refs 378 // is interpreted here as maximum_compaction which will 379 // cause SoftRefs to be cleared. 380 bool maximum_compaction = clear_all_soft_refs; 381 PSParallelCompact::invoke(maximum_compaction); 382 } else { 383 PSMarkSweep::invoke(clear_all_soft_refs); 384 } 385 } 386 387 // Failed allocation policy. Must be called from the VM thread, and 388 // only at a safepoint! Note that this method has policy for allocation 389 // flow, and NOT collection policy. So we do not check for gc collection 390 // time over limit here, that is the responsibility of the heap specific 391 // collection methods. This method decides where to attempt allocations, 392 // and when to attempt collections, but no collection specific policy. 393 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 394 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 395 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 396 assert(!is_gc_active(), "not reentrant"); 397 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 398 399 // We assume that allocation in eden will fail unless we collect. 400 401 // First level allocation failure, scavenge and allocate in young gen. 402 GCCauseSetter gccs(this, GCCause::_allocation_failure); 403 const bool invoked_full_gc = PSScavenge::invoke(); 404 HeapWord* result = young_gen()->allocate(size); 405 406 // Second level allocation failure. 407 // Mark sweep and allocate in young generation. 408 if (result == NULL && !invoked_full_gc) { 409 do_full_collection(false); 410 result = young_gen()->allocate(size); 411 } 412 413 death_march_check(result, size); 414 415 // Third level allocation failure. 416 // After mark sweep and young generation allocation failure, 417 // allocate in old generation. 418 if (result == NULL) { 419 result = old_gen()->allocate(size); 420 } 421 422 // Fourth level allocation failure. We're running out of memory. 423 // More complete mark sweep and allocate in young generation. 424 if (result == NULL) { 425 do_full_collection(true); 426 result = young_gen()->allocate(size); 427 } 428 429 // Fifth level allocation failure. 430 // After more complete mark sweep, allocate in old generation. 431 if (result == NULL) { 432 result = old_gen()->allocate(size); 433 } 434 435 return result; 436 } 437 438 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 439 CollectedHeap::ensure_parsability(retire_tlabs); 440 young_gen()->eden_space()->ensure_parsability(); 441 } 442 443 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 444 return young_gen()->eden_space()->tlab_capacity(thr); 445 } 446 447 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 448 return young_gen()->eden_space()->tlab_used(thr); 449 } 450 451 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 452 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 453 } 454 455 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 456 return young_gen()->allocate(size); 457 } 458 459 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 460 CollectedHeap::accumulate_statistics_all_tlabs(); 461 } 462 463 void ParallelScavengeHeap::resize_all_tlabs() { 464 CollectedHeap::resize_all_tlabs(); 465 } 466 467 // This method is used by System.gc() and JVMTI. 468 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 469 assert(!Heap_lock->owned_by_self(), 470 "this thread should not own the Heap_lock"); 471 472 uint gc_count = 0; 473 uint full_gc_count = 0; 474 { 475 MutexLocker ml(Heap_lock); 476 // This value is guarded by the Heap_lock 477 gc_count = total_collections(); 478 full_gc_count = total_full_collections(); 479 } 480 481 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 482 VMThread::execute(&op); 483 } 484 485 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 486 young_gen()->object_iterate(cl); 487 old_gen()->object_iterate(cl); 488 } 489 490 491 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 492 if (young_gen()->is_in_reserved(addr)) { 493 assert(young_gen()->is_in(addr), 494 "addr should be in allocated part of young gen"); 495 // called from os::print_location by find or VMError 496 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 497 Unimplemented(); 498 } else if (old_gen()->is_in_reserved(addr)) { 499 assert(old_gen()->is_in(addr), 500 "addr should be in allocated part of old gen"); 501 return old_gen()->start_array()->object_start((HeapWord*)addr); 502 } 503 return 0; 504 } 505 506 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 507 return oop(addr)->size(); 508 } 509 510 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 511 return block_start(addr) == addr; 512 } 513 514 jlong ParallelScavengeHeap::millis_since_last_gc() { 515 return UseParallelOldGC ? 516 PSParallelCompact::millis_since_last_gc() : 517 PSMarkSweep::millis_since_last_gc(); 518 } 519 520 void ParallelScavengeHeap::prepare_for_verify() { 521 ensure_parsability(false); // no need to retire TLABs for verification 522 } 523 524 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 525 PSOldGen* old = old_gen(); 526 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 527 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 528 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 529 530 PSYoungGen* young = young_gen(); 531 VirtualSpaceSummary young_summary(young->reserved().start(), 532 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 533 534 MutableSpace* eden = young_gen()->eden_space(); 535 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 536 537 MutableSpace* from = young_gen()->from_space(); 538 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 539 540 MutableSpace* to = young_gen()->to_space(); 541 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 542 543 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 544 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 545 } 546 547 void ParallelScavengeHeap::print_on(outputStream* st) const { 548 young_gen()->print_on(st); 549 old_gen()->print_on(st); 550 MetaspaceAux::print_on(st); 551 } 552 553 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 554 this->CollectedHeap::print_on_error(st); 555 556 if (UseParallelOldGC) { 557 st->cr(); 558 PSParallelCompact::print_on_error(st); 559 } 560 } 561 562 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 563 PSScavenge::gc_task_manager()->threads_do(tc); 564 } 565 566 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 567 PSScavenge::gc_task_manager()->print_threads_on(st); 568 } 569 570 void ParallelScavengeHeap::print_tracing_info() const { 571 if (TraceYoungGenTime) { 572 double time = PSScavenge::accumulated_time()->seconds(); 573 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 574 } 575 if (TraceOldGenTime) { 576 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds(); 577 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 578 } 579 } 580 581 582 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 583 // Why do we need the total_collections()-filter below? 584 if (total_collections() > 0) { 585 log_debug(gc, verify)("Tenured"); 586 old_gen()->verify(); 587 588 log_debug(gc, verify)("Eden"); 589 young_gen()->verify(); 590 } 591 } 592 593 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 594 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 595 gc_tracer->report_gc_heap_summary(when, heap_summary); 596 597 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 598 gc_tracer->report_metaspace_summary(when, metaspace_summary); 599 } 600 601 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 602 CollectedHeap* heap = Universe::heap(); 603 assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 604 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap"); 605 return (ParallelScavengeHeap*)heap; 606 } 607 608 // Before delegating the resize to the young generation, 609 // the reserved space for the young and old generations 610 // may be changed to accommodate the desired resize. 611 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 612 size_t survivor_size) { 613 if (UseAdaptiveGCBoundary) { 614 if (size_policy()->bytes_absorbed_from_eden() != 0) { 615 size_policy()->reset_bytes_absorbed_from_eden(); 616 return; // The generation changed size already. 617 } 618 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 619 } 620 621 // Delegate the resize to the generation. 622 _young_gen->resize(eden_size, survivor_size); 623 } 624 625 // Before delegating the resize to the old generation, 626 // the reserved space for the young and old generations 627 // may be changed to accommodate the desired resize. 628 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 629 if (UseAdaptiveGCBoundary) { 630 if (size_policy()->bytes_absorbed_from_eden() != 0) { 631 size_policy()->reset_bytes_absorbed_from_eden(); 632 return; // The generation changed size already. 633 } 634 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 635 } 636 637 // Delegate the resize to the generation. 638 _old_gen->resize(desired_free_space); 639 } 640 641 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 642 // nothing particular 643 } 644 645 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 646 // nothing particular 647 } 648 649 #ifndef PRODUCT 650 void ParallelScavengeHeap::record_gen_tops_before_GC() { 651 if (ZapUnusedHeapArea) { 652 young_gen()->record_spaces_top(); 653 old_gen()->record_spaces_top(); 654 } 655 } 656 657 void ParallelScavengeHeap::gen_mangle_unused_area() { 658 if (ZapUnusedHeapArea) { 659 young_gen()->eden_space()->mangle_unused_area(); 660 young_gen()->to_space()->mangle_unused_area(); 661 young_gen()->from_space()->mangle_unused_area(); 662 old_gen()->object_space()->mangle_unused_area(); 663 } 664 } 665 #endif