1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" 38 #include "gc_implementation/shared/gcHeapSummary.hpp" 39 #include "gc_implementation/shared/gcWhen.hpp" 40 #include "memory/gcLocker.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/vmThread.hpp" 45 #include "services/memTracker.hpp" 46 #include "utilities/vmError.hpp" 47 48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 52 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 54 55 size_t ParallelScavengeHeap::intra_heap_alignment() { 56 return GenCollectorPolicy::intra_heap_alignment(); 57 } 58 59 size_t ParallelScavengeHeap::alignment() { 60 return collector_policy()->min_alignment(); 61 } 62 63 jint ParallelScavengeHeap::initialize() { 64 CollectedHeap::pre_initialize(); 65 66 // Initialize collector policy 67 _collector_policy = new GenerationSizer(); 68 69 // An alias to save on typing below 70 GenerationSizer * p = _collector_policy; 71 72 const size_t heap_size = p->max_heap_byte_size(); 73 74 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, p->max_alignment()); 75 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); 76 77 os::trace_page_sizes("ps main", p->min_heap_byte_size(), 78 heap_size, alignment(), 79 heap_rs.base(), 80 heap_rs.size()); 81 if (!heap_rs.is_reserved()) { 82 vm_shutdown_during_initialization( 83 "Could not reserve enough space for object heap"); 84 return JNI_ENOMEM; 85 } 86 87 _reserved = MemRegion((HeapWord*)heap_rs.base(), 88 (HeapWord*)(heap_rs.base() + heap_rs.size())); 89 90 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 91 _barrier_set = barrier_set; 92 oopDesc::set_bs(_barrier_set); 93 if (_barrier_set == NULL) { 94 vm_shutdown_during_initialization( 95 "Could not reserve enough space for barrier set"); 96 return JNI_ENOMEM; 97 } 98 99 // Make up the generations 100 // Calculate the maximum size that a generation can grow. This 101 // includes growth into the other generation. Note that the 102 // parameter _max_gen_size is kept as the maximum 103 // size of the generation as the boundaries currently stand. 104 // _max_gen_size is still used as that value. 105 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 106 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 107 108 _gens = new AdjoiningGenerations(heap_rs, 109 p->initial_gen1_size(), 110 p->min_gen1_size(), 111 p->max_gen1_size(), 112 p->initial_gen0_size(), 113 p->min_gen0_size(), 114 p->max_gen0_size(), 115 alignment()); 116 117 _old_gen = _gens->old_gen(); 118 _young_gen = _gens->young_gen(); 119 120 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 121 const size_t old_capacity = _old_gen->capacity_in_bytes(); 122 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 123 _size_policy = 124 new PSAdaptiveSizePolicy(eden_capacity, 125 initial_promo_size, 126 young_gen()->to_space()->capacity_in_bytes(), 127 intra_heap_alignment(), 128 max_gc_pause_sec, 129 max_gc_minor_pause_sec, 130 GCTimeRatio 131 ); 132 133 assert(!UseAdaptiveGCBoundary || 134 (old_gen()->virtual_space()->high_boundary() == 135 young_gen()->virtual_space()->low_boundary()), 136 "Boundaries must meet"); 137 // initialize the policy counters - 2 collectors, 3 generations 138 _gc_policy_counters = 139 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 140 _psh = this; 141 142 // Set up the GCTaskManager 143 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 144 145 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 146 return JNI_ENOMEM; 147 } 148 149 return JNI_OK; 150 } 151 152 void ParallelScavengeHeap::post_initialize() { 153 // Need to init the tenuring threshold 154 PSScavenge::initialize(); 155 if (UseParallelOldGC) { 156 PSParallelCompact::post_initialize(); 157 } else { 158 PSMarkSweep::initialize(); 159 } 160 PSPromotionManager::initialize(); 161 } 162 163 void ParallelScavengeHeap::update_counters() { 164 young_gen()->update_counters(); 165 old_gen()->update_counters(); 166 MetaspaceCounters::update_performance_counters(); 167 CompressedClassSpaceCounters::update_performance_counters(); 168 } 169 170 size_t ParallelScavengeHeap::capacity() const { 171 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 172 return value; 173 } 174 175 size_t ParallelScavengeHeap::used() const { 176 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 177 return value; 178 } 179 180 bool ParallelScavengeHeap::is_maximal_no_gc() const { 181 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 182 } 183 184 185 size_t ParallelScavengeHeap::max_capacity() const { 186 size_t estimated = reserved_region().byte_size(); 187 if (UseAdaptiveSizePolicy) { 188 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 189 } else { 190 estimated -= young_gen()->to_space()->capacity_in_bytes(); 191 } 192 return MAX2(estimated, capacity()); 193 } 194 195 bool ParallelScavengeHeap::is_in(const void* p) const { 196 if (young_gen()->is_in(p)) { 197 return true; 198 } 199 200 if (old_gen()->is_in(p)) { 201 return true; 202 } 203 204 return false; 205 } 206 207 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 208 if (young_gen()->is_in_reserved(p)) { 209 return true; 210 } 211 212 if (old_gen()->is_in_reserved(p)) { 213 return true; 214 } 215 216 return false; 217 } 218 219 bool ParallelScavengeHeap::is_scavengable(const void* addr) { 220 return is_in_young((oop)addr); 221 } 222 223 #ifdef ASSERT 224 // Don't implement this by using is_in_young(). This method is used 225 // in some cases to check that is_in_young() is correct. 226 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { 227 assert(is_in_reserved(p) || p == NULL, 228 "Does not work if address is non-null and outside of the heap"); 229 // The order of the generations is old (low addr), young (high addr) 230 return p >= old_gen()->reserved().end(); 231 } 232 #endif 233 234 // There are two levels of allocation policy here. 235 // 236 // When an allocation request fails, the requesting thread must invoke a VM 237 // operation, transfer control to the VM thread, and await the results of a 238 // garbage collection. That is quite expensive, and we should avoid doing it 239 // multiple times if possible. 240 // 241 // To accomplish this, we have a basic allocation policy, and also a 242 // failed allocation policy. 243 // 244 // The basic allocation policy controls how you allocate memory without 245 // attempting garbage collection. It is okay to grab locks and 246 // expand the heap, if that can be done without coming to a safepoint. 247 // It is likely that the basic allocation policy will not be very 248 // aggressive. 249 // 250 // The failed allocation policy is invoked from the VM thread after 251 // the basic allocation policy is unable to satisfy a mem_allocate 252 // request. This policy needs to cover the entire range of collection, 253 // heap expansion, and out-of-memory conditions. It should make every 254 // attempt to allocate the requested memory. 255 256 // Basic allocation policy. Should never be called at a safepoint, or 257 // from the VM thread. 258 // 259 // This method must handle cases where many mem_allocate requests fail 260 // simultaneously. When that happens, only one VM operation will succeed, 261 // and the rest will not be executed. For that reason, this method loops 262 // during failed allocation attempts. If the java heap becomes exhausted, 263 // we rely on the size_policy object to force a bail out. 264 HeapWord* ParallelScavengeHeap::mem_allocate( 265 size_t size, 266 bool* gc_overhead_limit_was_exceeded) { 267 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 268 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 269 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 270 271 // In general gc_overhead_limit_was_exceeded should be false so 272 // set it so here and reset it to true only if the gc time 273 // limit is being exceeded as checked below. 274 *gc_overhead_limit_was_exceeded = false; 275 276 HeapWord* result = young_gen()->allocate(size); 277 278 uint loop_count = 0; 279 uint gc_count = 0; 280 int gclocker_stalled_count = 0; 281 282 while (result == NULL) { 283 // We don't want to have multiple collections for a single filled generation. 284 // To prevent this, each thread tracks the total_collections() value, and if 285 // the count has changed, does not do a new collection. 286 // 287 // The collection count must be read only while holding the heap lock. VM 288 // operations also hold the heap lock during collections. There is a lock 289 // contention case where thread A blocks waiting on the Heap_lock, while 290 // thread B is holding it doing a collection. When thread A gets the lock, 291 // the collection count has already changed. To prevent duplicate collections, 292 // The policy MUST attempt allocations during the same period it reads the 293 // total_collections() value! 294 { 295 MutexLocker ml(Heap_lock); 296 gc_count = Universe::heap()->total_collections(); 297 298 result = young_gen()->allocate(size); 299 if (result != NULL) { 300 return result; 301 } 302 303 // If certain conditions hold, try allocating from the old gen. 304 result = mem_allocate_old_gen(size); 305 if (result != NULL) { 306 return result; 307 } 308 309 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 310 return NULL; 311 } 312 313 // Failed to allocate without a gc. 314 if (GC_locker::is_active_and_needs_gc()) { 315 // If this thread is not in a jni critical section, we stall 316 // the requestor until the critical section has cleared and 317 // GC allowed. When the critical section clears, a GC is 318 // initiated by the last thread exiting the critical section; so 319 // we retry the allocation sequence from the beginning of the loop, 320 // rather than causing more, now probably unnecessary, GC attempts. 321 JavaThread* jthr = JavaThread::current(); 322 if (!jthr->in_critical()) { 323 MutexUnlocker mul(Heap_lock); 324 GC_locker::stall_until_clear(); 325 gclocker_stalled_count += 1; 326 continue; 327 } else { 328 if (CheckJNICalls) { 329 fatal("Possible deadlock due to allocating while" 330 " in jni critical section"); 331 } 332 return NULL; 333 } 334 } 335 } 336 337 if (result == NULL) { 338 // Generate a VM operation 339 VM_ParallelGCFailedAllocation op(size, gc_count); 340 VMThread::execute(&op); 341 342 // Did the VM operation execute? If so, return the result directly. 343 // This prevents us from looping until time out on requests that can 344 // not be satisfied. 345 if (op.prologue_succeeded()) { 346 assert(Universe::heap()->is_in_or_null(op.result()), 347 "result not in heap"); 348 349 // If GC was locked out during VM operation then retry allocation 350 // and/or stall as necessary. 351 if (op.gc_locked()) { 352 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 353 continue; // retry and/or stall as necessary 354 } 355 356 // Exit the loop if the gc time limit has been exceeded. 357 // The allocation must have failed above ("result" guarding 358 // this path is NULL) and the most recent collection has exceeded the 359 // gc overhead limit (although enough may have been collected to 360 // satisfy the allocation). Exit the loop so that an out-of-memory 361 // will be thrown (return a NULL ignoring the contents of 362 // op.result()), 363 // but clear gc_overhead_limit_exceeded so that the next collection 364 // starts with a clean slate (i.e., forgets about previous overhead 365 // excesses). Fill op.result() with a filler object so that the 366 // heap remains parsable. 367 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 368 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 369 370 if (limit_exceeded && softrefs_clear) { 371 *gc_overhead_limit_was_exceeded = true; 372 size_policy()->set_gc_overhead_limit_exceeded(false); 373 if (PrintGCDetails && Verbose) { 374 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 375 "return NULL because gc_overhead_limit_exceeded is set"); 376 } 377 if (op.result() != NULL) { 378 CollectedHeap::fill_with_object(op.result(), size); 379 } 380 return NULL; 381 } 382 383 return op.result(); 384 } 385 } 386 387 // The policy object will prevent us from looping forever. If the 388 // time spent in gc crosses a threshold, we will bail out. 389 loop_count++; 390 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 391 (loop_count % QueuedAllocationWarningCount == 0)) { 392 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 393 " size=%d", loop_count, size); 394 } 395 } 396 397 return result; 398 } 399 400 // A "death march" is a series of ultra-slow allocations in which a full gc is 401 // done before each allocation, and after the full gc the allocation still 402 // cannot be satisfied from the young gen. This routine detects that condition; 403 // it should be called after a full gc has been done and the allocation 404 // attempted from the young gen. The parameter 'addr' should be the result of 405 // that young gen allocation attempt. 406 void 407 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 408 if (addr != NULL) { 409 _death_march_count = 0; // death march has ended 410 } else if (_death_march_count == 0) { 411 if (should_alloc_in_eden(size)) { 412 _death_march_count = 1; // death march has started 413 } 414 } 415 } 416 417 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 418 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) { 419 // Size is too big for eden, or gc is locked out. 420 return old_gen()->allocate(size); 421 } 422 423 // If a "death march" is in progress, allocate from the old gen a limited 424 // number of times before doing a GC. 425 if (_death_march_count > 0) { 426 if (_death_march_count < 64) { 427 ++_death_march_count; 428 return old_gen()->allocate(size); 429 } else { 430 _death_march_count = 0; 431 } 432 } 433 return NULL; 434 } 435 436 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 437 if (UseParallelOldGC) { 438 // The do_full_collection() parameter clear_all_soft_refs 439 // is interpreted here as maximum_compaction which will 440 // cause SoftRefs to be cleared. 441 bool maximum_compaction = clear_all_soft_refs; 442 PSParallelCompact::invoke(maximum_compaction); 443 } else { 444 PSMarkSweep::invoke(clear_all_soft_refs); 445 } 446 } 447 448 // Failed allocation policy. Must be called from the VM thread, and 449 // only at a safepoint! Note that this method has policy for allocation 450 // flow, and NOT collection policy. So we do not check for gc collection 451 // time over limit here, that is the responsibility of the heap specific 452 // collection methods. This method decides where to attempt allocations, 453 // and when to attempt collections, but no collection specific policy. 454 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 455 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 456 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 457 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 458 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 459 460 // We assume that allocation in eden will fail unless we collect. 461 462 // First level allocation failure, scavenge and allocate in young gen. 463 GCCauseSetter gccs(this, GCCause::_allocation_failure); 464 const bool invoked_full_gc = PSScavenge::invoke(); 465 HeapWord* result = young_gen()->allocate(size); 466 467 // Second level allocation failure. 468 // Mark sweep and allocate in young generation. 469 if (result == NULL && !invoked_full_gc) { 470 do_full_collection(false); 471 result = young_gen()->allocate(size); 472 } 473 474 death_march_check(result, size); 475 476 // Third level allocation failure. 477 // After mark sweep and young generation allocation failure, 478 // allocate in old generation. 479 if (result == NULL) { 480 result = old_gen()->allocate(size); 481 } 482 483 // Fourth level allocation failure. We're running out of memory. 484 // More complete mark sweep and allocate in young generation. 485 if (result == NULL) { 486 do_full_collection(true); 487 result = young_gen()->allocate(size); 488 } 489 490 // Fifth level allocation failure. 491 // After more complete mark sweep, allocate in old generation. 492 if (result == NULL) { 493 result = old_gen()->allocate(size); 494 } 495 496 return result; 497 } 498 499 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 500 CollectedHeap::ensure_parsability(retire_tlabs); 501 young_gen()->eden_space()->ensure_parsability(); 502 } 503 504 size_t ParallelScavengeHeap::unsafe_max_alloc() { 505 return young_gen()->eden_space()->free_in_bytes(); 506 } 507 508 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 509 return young_gen()->eden_space()->tlab_capacity(thr); 510 } 511 512 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 513 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 514 } 515 516 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 517 return young_gen()->allocate(size); 518 } 519 520 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 521 CollectedHeap::accumulate_statistics_all_tlabs(); 522 } 523 524 void ParallelScavengeHeap::resize_all_tlabs() { 525 CollectedHeap::resize_all_tlabs(); 526 } 527 528 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 529 // We don't need barriers for stores to objects in the 530 // young gen and, a fortiori, for initializing stores to 531 // objects therein. 532 return is_in_young(new_obj); 533 } 534 535 // This method is used by System.gc() and JVMTI. 536 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 537 assert(!Heap_lock->owned_by_self(), 538 "this thread should not own the Heap_lock"); 539 540 unsigned int gc_count = 0; 541 unsigned int full_gc_count = 0; 542 { 543 MutexLocker ml(Heap_lock); 544 // This value is guarded by the Heap_lock 545 gc_count = Universe::heap()->total_collections(); 546 full_gc_count = Universe::heap()->total_full_collections(); 547 } 548 549 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 550 VMThread::execute(&op); 551 } 552 553 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) { 554 Unimplemented(); 555 } 556 557 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 558 young_gen()->object_iterate(cl); 559 old_gen()->object_iterate(cl); 560 } 561 562 563 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 564 if (young_gen()->is_in_reserved(addr)) { 565 assert(young_gen()->is_in(addr), 566 "addr should be in allocated part of young gen"); 567 // called from os::print_location by find or VMError 568 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 569 Unimplemented(); 570 } else if (old_gen()->is_in_reserved(addr)) { 571 assert(old_gen()->is_in(addr), 572 "addr should be in allocated part of old gen"); 573 return old_gen()->start_array()->object_start((HeapWord*)addr); 574 } 575 return 0; 576 } 577 578 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 579 return oop(addr)->size(); 580 } 581 582 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 583 return block_start(addr) == addr; 584 } 585 586 jlong ParallelScavengeHeap::millis_since_last_gc() { 587 return UseParallelOldGC ? 588 PSParallelCompact::millis_since_last_gc() : 589 PSMarkSweep::millis_since_last_gc(); 590 } 591 592 void ParallelScavengeHeap::prepare_for_verify() { 593 ensure_parsability(false); // no need to retire TLABs for verification 594 } 595 596 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 597 PSOldGen* old = old_gen(); 598 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 599 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 600 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 601 602 PSYoungGen* young = young_gen(); 603 VirtualSpaceSummary young_summary(young->reserved().start(), 604 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 605 606 MutableSpace* eden = young_gen()->eden_space(); 607 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 608 609 MutableSpace* from = young_gen()->from_space(); 610 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 611 612 MutableSpace* to = young_gen()->to_space(); 613 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 614 615 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 616 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 617 } 618 619 void ParallelScavengeHeap::print_on(outputStream* st) const { 620 young_gen()->print_on(st); 621 old_gen()->print_on(st); 622 MetaspaceAux::print_on(st); 623 } 624 625 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 626 this->CollectedHeap::print_on_error(st); 627 628 if (UseParallelOldGC) { 629 st->cr(); 630 PSParallelCompact::print_on_error(st); 631 } 632 } 633 634 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 635 PSScavenge::gc_task_manager()->threads_do(tc); 636 } 637 638 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 639 PSScavenge::gc_task_manager()->print_threads_on(st); 640 } 641 642 void ParallelScavengeHeap::print_tracing_info() const { 643 if (TraceGen0Time) { 644 double time = PSScavenge::accumulated_time()->seconds(); 645 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 646 } 647 if (TraceGen1Time) { 648 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds(); 649 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 650 } 651 } 652 653 654 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { 655 // Why do we need the total_collections()-filter below? 656 if (total_collections() > 0) { 657 if (!silent) { 658 gclog_or_tty->print("tenured "); 659 } 660 old_gen()->verify(); 661 662 if (!silent) { 663 gclog_or_tty->print("eden "); 664 } 665 young_gen()->verify(); 666 } 667 } 668 669 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 670 if (PrintGCDetails && Verbose) { 671 gclog_or_tty->print(" " SIZE_FORMAT 672 "->" SIZE_FORMAT 673 "(" SIZE_FORMAT ")", 674 prev_used, used(), capacity()); 675 } else { 676 gclog_or_tty->print(" " SIZE_FORMAT "K" 677 "->" SIZE_FORMAT "K" 678 "(" SIZE_FORMAT "K)", 679 prev_used / K, used() / K, capacity() / K); 680 } 681 } 682 683 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { 684 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 685 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 686 gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); 687 } 688 689 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 690 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 691 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 692 return _psh; 693 } 694 695 // Before delegating the resize to the young generation, 696 // the reserved space for the young and old generations 697 // may be changed to accomodate the desired resize. 698 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 699 size_t survivor_size) { 700 if (UseAdaptiveGCBoundary) { 701 if (size_policy()->bytes_absorbed_from_eden() != 0) { 702 size_policy()->reset_bytes_absorbed_from_eden(); 703 return; // The generation changed size already. 704 } 705 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 706 } 707 708 // Delegate the resize to the generation. 709 _young_gen->resize(eden_size, survivor_size); 710 } 711 712 // Before delegating the resize to the old generation, 713 // the reserved space for the young and old generations 714 // may be changed to accomodate the desired resize. 715 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 716 if (UseAdaptiveGCBoundary) { 717 if (size_policy()->bytes_absorbed_from_eden() != 0) { 718 size_policy()->reset_bytes_absorbed_from_eden(); 719 return; // The generation changed size already. 720 } 721 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 722 } 723 724 // Delegate the resize to the generation. 725 _old_gen->resize(desired_free_space); 726 } 727 728 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 729 // nothing particular 730 } 731 732 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 733 // nothing particular 734 } 735 736 #ifndef PRODUCT 737 void ParallelScavengeHeap::record_gen_tops_before_GC() { 738 if (ZapUnusedHeapArea) { 739 young_gen()->record_spaces_top(); 740 old_gen()->record_spaces_top(); 741 } 742 } 743 744 void ParallelScavengeHeap::gen_mangle_unused_area() { 745 if (ZapUnusedHeapArea) { 746 young_gen()->eden_space()->mangle_unused_area(); 747 young_gen()->to_space()->mangle_unused_area(); 748 young_gen()->from_space()->mangle_unused_area(); 749 old_gen()->object_space()->mangle_unused_area(); 750 } 751 } 752 #endif