1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/adjoiningGenerations.hpp" 27 #include "gc/parallel/adjoiningVirtualSpaces.hpp" 28 #include "gc/parallel/cardTableExtension.hpp" 29 #include "gc/parallel/gcTaskManager.hpp" 30 #include "gc/parallel/generationSizer.hpp" 31 #include "gc/parallel/objectStartArray.inline.hpp" 32 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 33 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 34 #include "gc/parallel/psMarkSweep.hpp" 35 #include "gc/parallel/psParallelCompact.inline.hpp" 36 #include "gc/parallel/psPromotionManager.hpp" 37 #include "gc/parallel/psScavenge.hpp" 38 #include "gc/parallel/vmPSOperations.hpp" 39 #include "gc/shared/gcHeapSummary.hpp" 40 #include "gc/shared/gcLocker.inline.hpp" 41 #include "gc/shared/gcWhen.hpp" 42 #include "logging/log.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/vmThread.hpp" 47 #include "services/memTracker.hpp" 48 #include "utilities/vmError.hpp" 49 50 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 51 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 52 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 53 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 54 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 55 56 jint ParallelScavengeHeap::initialize() { 57 CollectedHeap::pre_initialize(); 58 59 const size_t heap_size = _collector_policy->max_heap_byte_size(); 60 61 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); 62 63 os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(), 64 heap_size, generation_alignment(), 65 heap_rs.base(), 66 heap_rs.size()); 67 68 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); 69 70 CardTableExtension* const barrier_set = new CardTableExtension(reserved_region()); 71 barrier_set->initialize(); 72 set_barrier_set(barrier_set); 73 74 // Make up the generations 75 // Calculate the maximum size that a generation can grow. This 76 // includes growth into the other generation. Note that the 77 // parameter _max_gen_size is kept as the maximum 78 // size of the generation as the boundaries currently stand. 79 // _max_gen_size is still used as that value. 80 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 81 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 82 83 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); 84 85 _old_gen = _gens->old_gen(); 86 _young_gen = _gens->young_gen(); 87 88 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 89 const size_t old_capacity = _old_gen->capacity_in_bytes(); 90 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 91 _size_policy = 92 new PSAdaptiveSizePolicy(eden_capacity, 93 initial_promo_size, 94 young_gen()->to_space()->capacity_in_bytes(), 95 _collector_policy->gen_alignment(), 96 max_gc_pause_sec, 97 max_gc_minor_pause_sec, 98 GCTimeRatio 99 ); 100 101 assert(!UseAdaptiveGCBoundary || 102 (old_gen()->virtual_space()->high_boundary() == 103 young_gen()->virtual_space()->low_boundary()), 104 "Boundaries must meet"); 105 // initialize the policy counters - 2 collectors, 3 generations 106 _gc_policy_counters = 107 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 108 109 // Set up the GCTaskManager 110 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 111 112 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 113 return JNI_ENOMEM; 114 } 115 116 return JNI_OK; 117 } 118 119 void ParallelScavengeHeap::post_initialize() { 120 // Need to init the tenuring threshold 121 PSScavenge::initialize(); 122 if (UseParallelOldGC) { 123 PSParallelCompact::post_initialize(); 124 } else { 125 PSMarkSweep::initialize(); 126 } 127 PSPromotionManager::initialize(); 128 } 129 130 void ParallelScavengeHeap::update_counters() { 131 young_gen()->update_counters(); 132 old_gen()->update_counters(); 133 MetaspaceCounters::update_performance_counters(); 134 CompressedClassSpaceCounters::update_performance_counters(); 135 } 136 137 size_t ParallelScavengeHeap::capacity() const { 138 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 139 return value; 140 } 141 142 size_t ParallelScavengeHeap::used() const { 143 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 144 return value; 145 } 146 147 bool ParallelScavengeHeap::is_maximal_no_gc() const { 148 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 149 } 150 151 152 size_t ParallelScavengeHeap::max_capacity() const { 153 size_t estimated = reserved_region().byte_size(); 154 if (UseAdaptiveSizePolicy) { 155 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 156 } else { 157 estimated -= young_gen()->to_space()->capacity_in_bytes(); 158 } 159 return MAX2(estimated, capacity()); 160 } 161 162 bool ParallelScavengeHeap::is_in(const void* p) const { 163 return young_gen()->is_in(p) || old_gen()->is_in(p); 164 } 165 166 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 167 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 168 } 169 170 bool ParallelScavengeHeap::is_scavengable(const void* addr) { 171 return is_in_young((oop)addr); 172 } 173 174 // There are two levels of allocation policy here. 175 // 176 // When an allocation request fails, the requesting thread must invoke a VM 177 // operation, transfer control to the VM thread, and await the results of a 178 // garbage collection. That is quite expensive, and we should avoid doing it 179 // multiple times if possible. 180 // 181 // To accomplish this, we have a basic allocation policy, and also a 182 // failed allocation policy. 183 // 184 // The basic allocation policy controls how you allocate memory without 185 // attempting garbage collection. It is okay to grab locks and 186 // expand the heap, if that can be done without coming to a safepoint. 187 // It is likely that the basic allocation policy will not be very 188 // aggressive. 189 // 190 // The failed allocation policy is invoked from the VM thread after 191 // the basic allocation policy is unable to satisfy a mem_allocate 192 // request. This policy needs to cover the entire range of collection, 193 // heap expansion, and out-of-memory conditions. It should make every 194 // attempt to allocate the requested memory. 195 196 // Basic allocation policy. Should never be called at a safepoint, or 197 // from the VM thread. 198 // 199 // This method must handle cases where many mem_allocate requests fail 200 // simultaneously. When that happens, only one VM operation will succeed, 201 // and the rest will not be executed. For that reason, this method loops 202 // during failed allocation attempts. If the java heap becomes exhausted, 203 // we rely on the size_policy object to force a bail out. 204 HeapWord* ParallelScavengeHeap::mem_allocate( 205 size_t size, 206 bool* gc_overhead_limit_was_exceeded) { 207 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 208 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 209 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 210 211 // In general gc_overhead_limit_was_exceeded should be false so 212 // set it so here and reset it to true only if the gc time 213 // limit is being exceeded as checked below. 214 *gc_overhead_limit_was_exceeded = false; 215 216 HeapWord* result = young_gen()->allocate(size); 217 218 uint loop_count = 0; 219 uint gc_count = 0; 220 uint gclocker_stalled_count = 0; 221 222 while (result == NULL) { 223 // We don't want to have multiple collections for a single filled generation. 224 // To prevent this, each thread tracks the total_collections() value, and if 225 // the count has changed, does not do a new collection. 226 // 227 // The collection count must be read only while holding the heap lock. VM 228 // operations also hold the heap lock during collections. There is a lock 229 // contention case where thread A blocks waiting on the Heap_lock, while 230 // thread B is holding it doing a collection. When thread A gets the lock, 231 // the collection count has already changed. To prevent duplicate collections, 232 // The policy MUST attempt allocations during the same period it reads the 233 // total_collections() value! 234 { 235 MutexLocker ml(Heap_lock); 236 gc_count = total_collections(); 237 238 result = young_gen()->allocate(size); 239 if (result != NULL) { 240 return result; 241 } 242 243 // If certain conditions hold, try allocating from the old gen. 244 result = mem_allocate_old_gen(size); 245 if (result != NULL) { 246 return result; 247 } 248 249 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 250 return NULL; 251 } 252 253 // Failed to allocate without a gc. 254 if (GCLocker::is_active_and_needs_gc()) { 255 // If this thread is not in a jni critical section, we stall 256 // the requestor until the critical section has cleared and 257 // GC allowed. When the critical section clears, a GC is 258 // initiated by the last thread exiting the critical section; so 259 // we retry the allocation sequence from the beginning of the loop, 260 // rather than causing more, now probably unnecessary, GC attempts. 261 JavaThread* jthr = JavaThread::current(); 262 if (!jthr->in_critical()) { 263 MutexUnlocker mul(Heap_lock); 264 GCLocker::stall_until_clear(); 265 gclocker_stalled_count += 1; 266 continue; 267 } else { 268 if (CheckJNICalls) { 269 fatal("Possible deadlock due to allocating while" 270 " in jni critical section"); 271 } 272 return NULL; 273 } 274 } 275 } 276 277 if (result == NULL) { 278 // Generate a VM operation 279 VM_ParallelGCFailedAllocation op(size, gc_count); 280 VMThread::execute(&op); 281 282 // Did the VM operation execute? If so, return the result directly. 283 // This prevents us from looping until time out on requests that can 284 // not be satisfied. 285 if (op.prologue_succeeded()) { 286 assert(is_in_or_null(op.result()), "result not in heap"); 287 288 // If GC was locked out during VM operation then retry allocation 289 // and/or stall as necessary. 290 if (op.gc_locked()) { 291 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 292 continue; // retry and/or stall as necessary 293 } 294 295 // Exit the loop if the gc time limit has been exceeded. 296 // The allocation must have failed above ("result" guarding 297 // this path is NULL) and the most recent collection has exceeded the 298 // gc overhead limit (although enough may have been collected to 299 // satisfy the allocation). Exit the loop so that an out-of-memory 300 // will be thrown (return a NULL ignoring the contents of 301 // op.result()), 302 // but clear gc_overhead_limit_exceeded so that the next collection 303 // starts with a clean slate (i.e., forgets about previous overhead 304 // excesses). Fill op.result() with a filler object so that the 305 // heap remains parsable. 306 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 307 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 308 309 if (limit_exceeded && softrefs_clear) { 310 *gc_overhead_limit_was_exceeded = true; 311 size_policy()->set_gc_overhead_limit_exceeded(false); 312 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set"); 313 if (op.result() != NULL) { 314 CollectedHeap::fill_with_object(op.result(), size); 315 } 316 return NULL; 317 } 318 319 return op.result(); 320 } 321 } 322 323 // The policy object will prevent us from looping forever. If the 324 // time spent in gc crosses a threshold, we will bail out. 325 loop_count++; 326 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 327 (loop_count % QueuedAllocationWarningCount == 0)) { 328 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 329 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 330 } 331 } 332 333 return result; 334 } 335 336 // A "death march" is a series of ultra-slow allocations in which a full gc is 337 // done before each allocation, and after the full gc the allocation still 338 // cannot be satisfied from the young gen. This routine detects that condition; 339 // it should be called after a full gc has been done and the allocation 340 // attempted from the young gen. The parameter 'addr' should be the result of 341 // that young gen allocation attempt. 342 void 343 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 344 if (addr != NULL) { 345 _death_march_count = 0; // death march has ended 346 } else if (_death_march_count == 0) { 347 if (should_alloc_in_eden(size)) { 348 _death_march_count = 1; // death march has started 349 } 350 } 351 } 352 353 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 354 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 355 // Size is too big for eden, or gc is locked out. 356 return old_gen()->allocate(size); 357 } 358 359 // If a "death march" is in progress, allocate from the old gen a limited 360 // number of times before doing a GC. 361 if (_death_march_count > 0) { 362 if (_death_march_count < 64) { 363 ++_death_march_count; 364 return old_gen()->allocate(size); 365 } else { 366 _death_march_count = 0; 367 } 368 } 369 return NULL; 370 } 371 372 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 373 if (UseParallelOldGC) { 374 // The do_full_collection() parameter clear_all_soft_refs 375 // is interpreted here as maximum_compaction which will 376 // cause SoftRefs to be cleared. 377 bool maximum_compaction = clear_all_soft_refs; 378 PSParallelCompact::invoke(maximum_compaction); 379 } else { 380 PSMarkSweep::invoke(clear_all_soft_refs); 381 } 382 } 383 384 // Failed allocation policy. Must be called from the VM thread, and 385 // only at a safepoint! Note that this method has policy for allocation 386 // flow, and NOT collection policy. So we do not check for gc collection 387 // time over limit here, that is the responsibility of the heap specific 388 // collection methods. This method decides where to attempt allocations, 389 // and when to attempt collections, but no collection specific policy. 390 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 391 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 392 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 393 assert(!is_gc_active(), "not reentrant"); 394 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 395 396 // We assume that allocation in eden will fail unless we collect. 397 398 // First level allocation failure, scavenge and allocate in young gen. 399 GCCauseSetter gccs(this, GCCause::_allocation_failure); 400 const bool invoked_full_gc = PSScavenge::invoke(); 401 HeapWord* result = young_gen()->allocate(size); 402 403 // Second level allocation failure. 404 // Mark sweep and allocate in young generation. 405 if (result == NULL && !invoked_full_gc) { 406 do_full_collection(false); 407 result = young_gen()->allocate(size); 408 } 409 410 death_march_check(result, size); 411 412 // Third level allocation failure. 413 // After mark sweep and young generation allocation failure, 414 // allocate in old generation. 415 if (result == NULL) { 416 result = old_gen()->allocate(size); 417 } 418 419 // Fourth level allocation failure. We're running out of memory. 420 // More complete mark sweep and allocate in young generation. 421 if (result == NULL) { 422 do_full_collection(true); 423 result = young_gen()->allocate(size); 424 } 425 426 // Fifth level allocation failure. 427 // After more complete mark sweep, allocate in old generation. 428 if (result == NULL) { 429 result = old_gen()->allocate(size); 430 } 431 432 return result; 433 } 434 435 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 436 CollectedHeap::ensure_parsability(retire_tlabs); 437 young_gen()->eden_space()->ensure_parsability(); 438 } 439 440 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 441 return young_gen()->eden_space()->tlab_capacity(thr); 442 } 443 444 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 445 return young_gen()->eden_space()->tlab_used(thr); 446 } 447 448 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 449 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 450 } 451 452 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 453 return young_gen()->allocate(size); 454 } 455 456 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 457 CollectedHeap::accumulate_statistics_all_tlabs(); 458 } 459 460 void ParallelScavengeHeap::resize_all_tlabs() { 461 CollectedHeap::resize_all_tlabs(); 462 } 463 464 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 465 // We don't need barriers for stores to objects in the 466 // young gen and, a fortiori, for initializing stores to 467 // objects therein. 468 return is_in_young(new_obj); 469 } 470 471 // This method is used by System.gc() and JVMTI. 472 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 473 assert(!Heap_lock->owned_by_self(), 474 "this thread should not own the Heap_lock"); 475 476 uint gc_count = 0; 477 uint full_gc_count = 0; 478 { 479 MutexLocker ml(Heap_lock); 480 // This value is guarded by the Heap_lock 481 gc_count = total_collections(); 482 full_gc_count = total_full_collections(); 483 } 484 485 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 486 VMThread::execute(&op); 487 } 488 489 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 490 young_gen()->object_iterate(cl); 491 old_gen()->object_iterate(cl); 492 } 493 494 495 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 496 if (young_gen()->is_in_reserved(addr)) { 497 assert(young_gen()->is_in(addr), 498 "addr should be in allocated part of young gen"); 499 // called from os::print_location by find or VMError 500 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 501 Unimplemented(); 502 } else if (old_gen()->is_in_reserved(addr)) { 503 assert(old_gen()->is_in(addr), 504 "addr should be in allocated part of old gen"); 505 return old_gen()->start_array()->object_start((HeapWord*)addr); 506 } 507 return 0; 508 } 509 510 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 511 return oop(addr)->size(); 512 } 513 514 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 515 return block_start(addr) == addr; 516 } 517 518 jlong ParallelScavengeHeap::millis_since_last_gc() { 519 return UseParallelOldGC ? 520 PSParallelCompact::millis_since_last_gc() : 521 PSMarkSweep::millis_since_last_gc(); 522 } 523 524 void ParallelScavengeHeap::prepare_for_verify() { 525 ensure_parsability(false); // no need to retire TLABs for verification 526 } 527 528 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 529 PSOldGen* old = old_gen(); 530 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 531 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 532 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 533 534 PSYoungGen* young = young_gen(); 535 VirtualSpaceSummary young_summary(young->reserved().start(), 536 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 537 538 MutableSpace* eden = young_gen()->eden_space(); 539 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 540 541 MutableSpace* from = young_gen()->from_space(); 542 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 543 544 MutableSpace* to = young_gen()->to_space(); 545 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 546 547 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 548 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 549 } 550 551 void ParallelScavengeHeap::print_on(outputStream* st) const { 552 young_gen()->print_on(st); 553 old_gen()->print_on(st); 554 MetaspaceAux::print_on(st); 555 } 556 557 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 558 this->CollectedHeap::print_on_error(st); 559 560 if (UseParallelOldGC) { 561 st->cr(); 562 PSParallelCompact::print_on_error(st); 563 } 564 } 565 566 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 567 PSScavenge::gc_task_manager()->threads_do(tc); 568 } 569 570 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 571 PSScavenge::gc_task_manager()->print_threads_on(st); 572 } 573 574 void ParallelScavengeHeap::print_tracing_info() const { 575 if (TraceYoungGenTime) { 576 double time = PSScavenge::accumulated_time()->seconds(); 577 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 578 } 579 if (TraceOldGenTime) { 580 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds(); 581 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 582 } 583 } 584 585 586 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 587 // Why do we need the total_collections()-filter below? 588 if (total_collections() > 0) { 589 log_debug(gc, verify)("Tenured"); 590 old_gen()->verify(); 591 592 log_debug(gc, verify)("Eden"); 593 young_gen()->verify(); 594 } 595 } 596 597 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 598 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 599 gc_tracer->report_gc_heap_summary(when, heap_summary); 600 601 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 602 gc_tracer->report_metaspace_summary(when, metaspace_summary); 603 } 604 605 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 606 CollectedHeap* heap = Universe::heap(); 607 assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 608 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap"); 609 return (ParallelScavengeHeap*)heap; 610 } 611 612 // Before delegating the resize to the young generation, 613 // the reserved space for the young and old generations 614 // may be changed to accommodate the desired resize. 615 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 616 size_t survivor_size) { 617 if (UseAdaptiveGCBoundary) { 618 if (size_policy()->bytes_absorbed_from_eden() != 0) { 619 size_policy()->reset_bytes_absorbed_from_eden(); 620 return; // The generation changed size already. 621 } 622 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 623 } 624 625 // Delegate the resize to the generation. 626 _young_gen->resize(eden_size, survivor_size); 627 } 628 629 // Before delegating the resize to the old generation, 630 // the reserved space for the young and old generations 631 // may be changed to accommodate the desired resize. 632 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 633 if (UseAdaptiveGCBoundary) { 634 if (size_policy()->bytes_absorbed_from_eden() != 0) { 635 size_policy()->reset_bytes_absorbed_from_eden(); 636 return; // The generation changed size already. 637 } 638 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 639 } 640 641 // Delegate the resize to the generation. 642 _old_gen->resize(desired_free_space); 643 } 644 645 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 646 // nothing particular 647 } 648 649 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 650 // nothing particular 651 } 652 653 #ifndef PRODUCT 654 void ParallelScavengeHeap::record_gen_tops_before_GC() { 655 if (ZapUnusedHeapArea) { 656 young_gen()->record_spaces_top(); 657 old_gen()->record_spaces_top(); 658 } 659 } 660 661 void ParallelScavengeHeap::gen_mangle_unused_area() { 662 if (ZapUnusedHeapArea) { 663 young_gen()->eden_space()->mangle_unused_area(); 664 young_gen()->to_space()->mangle_unused_area(); 665 young_gen()->from_space()->mangle_unused_area(); 666 old_gen()->object_space()->mangle_unused_area(); 667 } 668 } 669 #endif