1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" 38 #include "gc_implementation/shared/gcHeapSummary.hpp" 39 #include "gc_implementation/shared/gcWhen.hpp" 40 #include "memory/gcLocker.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/vmThread.hpp" 45 #include "services/memTracker.hpp" 46 #include "utilities/vmError.hpp" 47 48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 52 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 54 55 static void trace_gen_sizes(const char* const str, 56 size_t og_min, size_t og_max, 57 size_t yg_min, size_t yg_max) 58 { 59 if (TracePageSizes) { 60 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " 61 SIZE_FORMAT "," SIZE_FORMAT " " 62 SIZE_FORMAT, 63 str, 64 og_min / K, og_max / K, 65 yg_min / K, yg_max / K, 66 (og_max + yg_max) / K); 67 } 68 } 69 70 jint ParallelScavengeHeap::initialize() { 71 CollectedHeap::pre_initialize(); 72 73 // Cannot be initialized until after the flags are parsed 74 // GenerationSizer flag_parser; 75 _collector_policy = new GenerationSizer(); 76 77 size_t yg_min_size = _collector_policy->min_young_gen_size(); 78 size_t yg_max_size = _collector_policy->max_young_gen_size(); 79 size_t og_min_size = _collector_policy->min_old_gen_size(); 80 size_t og_max_size = _collector_policy->max_old_gen_size(); 81 82 trace_gen_sizes("ps heap raw", 83 og_min_size, og_max_size, 84 yg_min_size, yg_max_size); 85 86 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, 87 yg_max_size + og_max_size, 88 8); 89 90 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); 91 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); 92 93 // Update sizes to reflect the selected page size(s). 94 // 95 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 96 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 97 // move to the common code. 98 yg_min_size = align_size_up(yg_min_size, yg_align); 99 yg_max_size = align_size_up(yg_max_size, yg_align); 100 size_t yg_cur_size = 101 align_size_up(_collector_policy->young_gen_size(), yg_align); 102 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 103 104 og_min_size = align_size_up(og_min_size, og_align); 105 // Align old gen size down to preserve specified heap size. 106 assert(og_align == yg_align, "sanity"); 107 og_max_size = align_size_down(og_max_size, og_align); 108 og_max_size = MAX2(og_max_size, og_min_size); 109 size_t og_cur_size = 110 align_size_down(_collector_policy->old_gen_size(), og_align); 111 og_cur_size = MAX2(og_cur_size, og_min_size); 112 113 trace_gen_sizes("ps heap rnd", 114 og_min_size, og_max_size, 115 yg_min_size, yg_max_size); 116 117 const size_t heap_size = og_max_size + yg_max_size; 118 119 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align); 120 121 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); 122 123 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 124 og_max_size + yg_max_size, og_page_sz, 125 heap_rs.base(), 126 heap_rs.size()); 127 if (!heap_rs.is_reserved()) { 128 vm_shutdown_during_initialization( 129 "Could not reserve enough space for object heap"); 130 return JNI_ENOMEM; 131 } 132 133 _reserved = MemRegion((HeapWord*)heap_rs.base(), 134 (HeapWord*)(heap_rs.base() + heap_rs.size())); 135 136 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 137 _barrier_set = barrier_set; 138 oopDesc::set_bs(_barrier_set); 139 if (_barrier_set == NULL) { 140 vm_shutdown_during_initialization( 141 "Could not reserve enough space for barrier set"); 142 return JNI_ENOMEM; 143 } 144 145 // Initial young gen size is 4 Mb 146 // 147 // XXX - what about flag_parser.young_gen_size()? 148 const size_t init_young_size = align_size_up(4 * M, yg_align); 149 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); 150 151 // Make up the generations 152 // Calculate the maximum size that a generation can grow. This 153 // includes growth into the other generation. Note that the 154 // parameter _max_gen_size is kept as the maximum 155 // size of the generation as the boundaries currently stand. 156 // _max_gen_size is still used as that value. 157 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 158 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 159 160 _gens = new AdjoiningGenerations(heap_rs, 161 og_cur_size, 162 og_min_size, 163 og_max_size, 164 yg_cur_size, 165 yg_min_size, 166 yg_max_size, 167 yg_align); 168 169 _old_gen = _gens->old_gen(); 170 _young_gen = _gens->young_gen(); 171 172 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 173 const size_t old_capacity = _old_gen->capacity_in_bytes(); 174 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 175 _size_policy = 176 new PSAdaptiveSizePolicy(eden_capacity, 177 initial_promo_size, 178 young_gen()->to_space()->capacity_in_bytes(), 179 intra_heap_alignment(), 180 max_gc_pause_sec, 181 max_gc_minor_pause_sec, 182 GCTimeRatio 183 ); 184 185 assert(!UseAdaptiveGCBoundary || 186 (old_gen()->virtual_space()->high_boundary() == 187 young_gen()->virtual_space()->low_boundary()), 188 "Boundaries must meet"); 189 // initialize the policy counters - 2 collectors, 3 generations 190 _gc_policy_counters = 191 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 192 _psh = this; 193 194 // Set up the GCTaskManager 195 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 196 197 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 198 return JNI_ENOMEM; 199 } 200 201 return JNI_OK; 202 } 203 204 void ParallelScavengeHeap::post_initialize() { 205 // Need to init the tenuring threshold 206 PSScavenge::initialize(); 207 if (UseParallelOldGC) { 208 PSParallelCompact::post_initialize(); 209 } else { 210 PSMarkSweep::initialize(); 211 } 212 PSPromotionManager::initialize(); 213 } 214 215 void ParallelScavengeHeap::update_counters() { 216 young_gen()->update_counters(); 217 old_gen()->update_counters(); 218 MetaspaceCounters::update_performance_counters(); 219 CompressedClassSpaceCounters::update_performance_counters(); 220 } 221 222 size_t ParallelScavengeHeap::capacity() const { 223 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 224 return value; 225 } 226 227 size_t ParallelScavengeHeap::used() const { 228 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 229 return value; 230 } 231 232 bool ParallelScavengeHeap::is_maximal_no_gc() const { 233 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 234 } 235 236 237 size_t ParallelScavengeHeap::max_capacity() const { 238 size_t estimated = reserved_region().byte_size(); 239 if (UseAdaptiveSizePolicy) { 240 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 241 } else { 242 estimated -= young_gen()->to_space()->capacity_in_bytes(); 243 } 244 return MAX2(estimated, capacity()); 245 } 246 247 bool ParallelScavengeHeap::is_in(const void* p) const { 248 if (young_gen()->is_in(p)) { 249 return true; 250 } 251 252 if (old_gen()->is_in(p)) { 253 return true; 254 } 255 256 return false; 257 } 258 259 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 260 if (young_gen()->is_in_reserved(p)) { 261 return true; 262 } 263 264 if (old_gen()->is_in_reserved(p)) { 265 return true; 266 } 267 268 return false; 269 } 270 271 bool ParallelScavengeHeap::is_scavengable(const void* addr) { 272 return is_in_young((oop)addr); 273 } 274 275 #ifdef ASSERT 276 // Don't implement this by using is_in_young(). This method is used 277 // in some cases to check that is_in_young() is correct. 278 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { 279 assert(is_in_reserved(p) || p == NULL, 280 "Does not work if address is non-null and outside of the heap"); 281 // The order of the generations is old (low addr), young (high addr) 282 return p >= old_gen()->reserved().end(); 283 } 284 #endif 285 286 // There are two levels of allocation policy here. 287 // 288 // When an allocation request fails, the requesting thread must invoke a VM 289 // operation, transfer control to the VM thread, and await the results of a 290 // garbage collection. That is quite expensive, and we should avoid doing it 291 // multiple times if possible. 292 // 293 // To accomplish this, we have a basic allocation policy, and also a 294 // failed allocation policy. 295 // 296 // The basic allocation policy controls how you allocate memory without 297 // attempting garbage collection. It is okay to grab locks and 298 // expand the heap, if that can be done without coming to a safepoint. 299 // It is likely that the basic allocation policy will not be very 300 // aggressive. 301 // 302 // The failed allocation policy is invoked from the VM thread after 303 // the basic allocation policy is unable to satisfy a mem_allocate 304 // request. This policy needs to cover the entire range of collection, 305 // heap expansion, and out-of-memory conditions. It should make every 306 // attempt to allocate the requested memory. 307 308 // Basic allocation policy. Should never be called at a safepoint, or 309 // from the VM thread. 310 // 311 // This method must handle cases where many mem_allocate requests fail 312 // simultaneously. When that happens, only one VM operation will succeed, 313 // and the rest will not be executed. For that reason, this method loops 314 // during failed allocation attempts. If the java heap becomes exhausted, 315 // we rely on the size_policy object to force a bail out. 316 HeapWord* ParallelScavengeHeap::mem_allocate( 317 size_t size, 318 bool* gc_overhead_limit_was_exceeded) { 319 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 320 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 321 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 322 323 // In general gc_overhead_limit_was_exceeded should be false so 324 // set it so here and reset it to true only if the gc time 325 // limit is being exceeded as checked below. 326 *gc_overhead_limit_was_exceeded = false; 327 328 HeapWord* result = young_gen()->allocate(size); 329 330 uint loop_count = 0; 331 uint gc_count = 0; 332 int gclocker_stalled_count = 0; 333 334 while (result == NULL) { 335 // We don't want to have multiple collections for a single filled generation. 336 // To prevent this, each thread tracks the total_collections() value, and if 337 // the count has changed, does not do a new collection. 338 // 339 // The collection count must be read only while holding the heap lock. VM 340 // operations also hold the heap lock during collections. There is a lock 341 // contention case where thread A blocks waiting on the Heap_lock, while 342 // thread B is holding it doing a collection. When thread A gets the lock, 343 // the collection count has already changed. To prevent duplicate collections, 344 // The policy MUST attempt allocations during the same period it reads the 345 // total_collections() value! 346 { 347 MutexLocker ml(Heap_lock); 348 gc_count = Universe::heap()->total_collections(); 349 350 result = young_gen()->allocate(size); 351 if (result != NULL) { 352 return result; 353 } 354 355 // If certain conditions hold, try allocating from the old gen. 356 result = mem_allocate_old_gen(size); 357 if (result != NULL) { 358 return result; 359 } 360 361 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 362 return NULL; 363 } 364 365 // Failed to allocate without a gc. 366 if (GC_locker::is_active_and_needs_gc()) { 367 // If this thread is not in a jni critical section, we stall 368 // the requestor until the critical section has cleared and 369 // GC allowed. When the critical section clears, a GC is 370 // initiated by the last thread exiting the critical section; so 371 // we retry the allocation sequence from the beginning of the loop, 372 // rather than causing more, now probably unnecessary, GC attempts. 373 JavaThread* jthr = JavaThread::current(); 374 if (!jthr->in_critical()) { 375 MutexUnlocker mul(Heap_lock); 376 GC_locker::stall_until_clear(); 377 gclocker_stalled_count += 1; 378 continue; 379 } else { 380 if (CheckJNICalls) { 381 fatal("Possible deadlock due to allocating while" 382 " in jni critical section"); 383 } 384 return NULL; 385 } 386 } 387 } 388 389 if (result == NULL) { 390 // Generate a VM operation 391 VM_ParallelGCFailedAllocation op(size, gc_count); 392 VMThread::execute(&op); 393 394 // Did the VM operation execute? If so, return the result directly. 395 // This prevents us from looping until time out on requests that can 396 // not be satisfied. 397 if (op.prologue_succeeded()) { 398 assert(Universe::heap()->is_in_or_null(op.result()), 399 "result not in heap"); 400 401 // If GC was locked out during VM operation then retry allocation 402 // and/or stall as necessary. 403 if (op.gc_locked()) { 404 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 405 continue; // retry and/or stall as necessary 406 } 407 408 // Exit the loop if the gc time limit has been exceeded. 409 // The allocation must have failed above ("result" guarding 410 // this path is NULL) and the most recent collection has exceeded the 411 // gc overhead limit (although enough may have been collected to 412 // satisfy the allocation). Exit the loop so that an out-of-memory 413 // will be thrown (return a NULL ignoring the contents of 414 // op.result()), 415 // but clear gc_overhead_limit_exceeded so that the next collection 416 // starts with a clean slate (i.e., forgets about previous overhead 417 // excesses). Fill op.result() with a filler object so that the 418 // heap remains parsable. 419 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 420 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 421 422 if (limit_exceeded && softrefs_clear) { 423 *gc_overhead_limit_was_exceeded = true; 424 size_policy()->set_gc_overhead_limit_exceeded(false); 425 if (PrintGCDetails && Verbose) { 426 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 427 "return NULL because gc_overhead_limit_exceeded is set"); 428 } 429 if (op.result() != NULL) { 430 CollectedHeap::fill_with_object(op.result(), size); 431 } 432 return NULL; 433 } 434 435 return op.result(); 436 } 437 } 438 439 // The policy object will prevent us from looping forever. If the 440 // time spent in gc crosses a threshold, we will bail out. 441 loop_count++; 442 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 443 (loop_count % QueuedAllocationWarningCount == 0)) { 444 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 445 " size=%d", loop_count, size); 446 } 447 } 448 449 return result; 450 } 451 452 // A "death march" is a series of ultra-slow allocations in which a full gc is 453 // done before each allocation, and after the full gc the allocation still 454 // cannot be satisfied from the young gen. This routine detects that condition; 455 // it should be called after a full gc has been done and the allocation 456 // attempted from the young gen. The parameter 'addr' should be the result of 457 // that young gen allocation attempt. 458 void 459 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 460 if (addr != NULL) { 461 _death_march_count = 0; // death march has ended 462 } else if (_death_march_count == 0) { 463 if (should_alloc_in_eden(size)) { 464 _death_march_count = 1; // death march has started 465 } 466 } 467 } 468 469 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 470 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) { 471 // Size is too big for eden, or gc is locked out. 472 return old_gen()->allocate(size); 473 } 474 475 // If a "death march" is in progress, allocate from the old gen a limited 476 // number of times before doing a GC. 477 if (_death_march_count > 0) { 478 if (_death_march_count < 64) { 479 ++_death_march_count; 480 return old_gen()->allocate(size); 481 } else { 482 _death_march_count = 0; 483 } 484 } 485 return NULL; 486 } 487 488 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 489 if (UseParallelOldGC) { 490 // The do_full_collection() parameter clear_all_soft_refs 491 // is interpreted here as maximum_compaction which will 492 // cause SoftRefs to be cleared. 493 bool maximum_compaction = clear_all_soft_refs; 494 PSParallelCompact::invoke(maximum_compaction); 495 } else { 496 PSMarkSweep::invoke(clear_all_soft_refs); 497 } 498 } 499 500 // Failed allocation policy. Must be called from the VM thread, and 501 // only at a safepoint! Note that this method has policy for allocation 502 // flow, and NOT collection policy. So we do not check for gc collection 503 // time over limit here, that is the responsibility of the heap specific 504 // collection methods. This method decides where to attempt allocations, 505 // and when to attempt collections, but no collection specific policy. 506 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 507 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 508 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 509 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 510 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 511 512 // We assume that allocation in eden will fail unless we collect. 513 514 // First level allocation failure, scavenge and allocate in young gen. 515 GCCauseSetter gccs(this, GCCause::_allocation_failure); 516 const bool invoked_full_gc = PSScavenge::invoke(); 517 HeapWord* result = young_gen()->allocate(size); 518 519 // Second level allocation failure. 520 // Mark sweep and allocate in young generation. 521 if (result == NULL && !invoked_full_gc) { 522 do_full_collection(false); 523 result = young_gen()->allocate(size); 524 } 525 526 death_march_check(result, size); 527 528 // Third level allocation failure. 529 // After mark sweep and young generation allocation failure, 530 // allocate in old generation. 531 if (result == NULL) { 532 result = old_gen()->allocate(size); 533 } 534 535 // Fourth level allocation failure. We're running out of memory. 536 // More complete mark sweep and allocate in young generation. 537 if (result == NULL) { 538 do_full_collection(true); 539 result = young_gen()->allocate(size); 540 } 541 542 // Fifth level allocation failure. 543 // After more complete mark sweep, allocate in old generation. 544 if (result == NULL) { 545 result = old_gen()->allocate(size); 546 } 547 548 return result; 549 } 550 551 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 552 CollectedHeap::ensure_parsability(retire_tlabs); 553 young_gen()->eden_space()->ensure_parsability(); 554 } 555 556 size_t ParallelScavengeHeap::unsafe_max_alloc() { 557 return young_gen()->eden_space()->free_in_bytes(); 558 } 559 560 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 561 return young_gen()->eden_space()->tlab_capacity(thr); 562 } 563 564 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 565 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 566 } 567 568 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 569 return young_gen()->allocate(size); 570 } 571 572 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 573 CollectedHeap::accumulate_statistics_all_tlabs(); 574 } 575 576 void ParallelScavengeHeap::resize_all_tlabs() { 577 CollectedHeap::resize_all_tlabs(); 578 } 579 580 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 581 // We don't need barriers for stores to objects in the 582 // young gen and, a fortiori, for initializing stores to 583 // objects therein. 584 return is_in_young(new_obj); 585 } 586 587 // This method is used by System.gc() and JVMTI. 588 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 589 assert(!Heap_lock->owned_by_self(), 590 "this thread should not own the Heap_lock"); 591 592 unsigned int gc_count = 0; 593 unsigned int full_gc_count = 0; 594 { 595 MutexLocker ml(Heap_lock); 596 // This value is guarded by the Heap_lock 597 gc_count = Universe::heap()->total_collections(); 598 full_gc_count = Universe::heap()->total_full_collections(); 599 } 600 601 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 602 VMThread::execute(&op); 603 } 604 605 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) { 606 Unimplemented(); 607 } 608 609 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 610 young_gen()->object_iterate(cl); 611 old_gen()->object_iterate(cl); 612 } 613 614 615 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 616 if (young_gen()->is_in_reserved(addr)) { 617 assert(young_gen()->is_in(addr), 618 "addr should be in allocated part of young gen"); 619 // called from os::print_location by find or VMError 620 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 621 Unimplemented(); 622 } else if (old_gen()->is_in_reserved(addr)) { 623 assert(old_gen()->is_in(addr), 624 "addr should be in allocated part of old gen"); 625 return old_gen()->start_array()->object_start((HeapWord*)addr); 626 } 627 return 0; 628 } 629 630 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 631 return oop(addr)->size(); 632 } 633 634 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 635 return block_start(addr) == addr; 636 } 637 638 jlong ParallelScavengeHeap::millis_since_last_gc() { 639 return UseParallelOldGC ? 640 PSParallelCompact::millis_since_last_gc() : 641 PSMarkSweep::millis_since_last_gc(); 642 } 643 644 void ParallelScavengeHeap::prepare_for_verify() { 645 ensure_parsability(false); // no need to retire TLABs for verification 646 } 647 648 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 649 PSOldGen* old = old_gen(); 650 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 651 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 652 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 653 654 PSYoungGen* young = young_gen(); 655 VirtualSpaceSummary young_summary(young->reserved().start(), 656 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 657 658 MutableSpace* eden = young_gen()->eden_space(); 659 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 660 661 MutableSpace* from = young_gen()->from_space(); 662 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 663 664 MutableSpace* to = young_gen()->to_space(); 665 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 666 667 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 668 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 669 } 670 671 void ParallelScavengeHeap::print_on(outputStream* st) const { 672 young_gen()->print_on(st); 673 old_gen()->print_on(st); 674 MetaspaceAux::print_on(st); 675 } 676 677 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 678 this->CollectedHeap::print_on_error(st); 679 680 if (UseParallelOldGC) { 681 st->cr(); 682 PSParallelCompact::print_on_error(st); 683 } 684 } 685 686 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 687 PSScavenge::gc_task_manager()->threads_do(tc); 688 } 689 690 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 691 PSScavenge::gc_task_manager()->print_threads_on(st); 692 } 693 694 void ParallelScavengeHeap::print_tracing_info() const { 695 if (TraceGen0Time) { 696 double time = PSScavenge::accumulated_time()->seconds(); 697 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 698 } 699 if (TraceGen1Time) { 700 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds(); 701 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 702 } 703 } 704 705 706 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { 707 // Why do we need the total_collections()-filter below? 708 if (total_collections() > 0) { 709 if (!silent) { 710 gclog_or_tty->print("tenured "); 711 } 712 old_gen()->verify(); 713 714 if (!silent) { 715 gclog_or_tty->print("eden "); 716 } 717 young_gen()->verify(); 718 } 719 } 720 721 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 722 if (PrintGCDetails && Verbose) { 723 gclog_or_tty->print(" " SIZE_FORMAT 724 "->" SIZE_FORMAT 725 "(" SIZE_FORMAT ")", 726 prev_used, used(), capacity()); 727 } else { 728 gclog_or_tty->print(" " SIZE_FORMAT "K" 729 "->" SIZE_FORMAT "K" 730 "(" SIZE_FORMAT "K)", 731 prev_used / K, used() / K, capacity() / K); 732 } 733 } 734 735 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { 736 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 737 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 738 gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); 739 } 740 741 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 742 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 743 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 744 return _psh; 745 } 746 747 // Before delegating the resize to the young generation, 748 // the reserved space for the young and old generations 749 // may be changed to accomodate the desired resize. 750 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 751 size_t survivor_size) { 752 if (UseAdaptiveGCBoundary) { 753 if (size_policy()->bytes_absorbed_from_eden() != 0) { 754 size_policy()->reset_bytes_absorbed_from_eden(); 755 return; // The generation changed size already. 756 } 757 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 758 } 759 760 // Delegate the resize to the generation. 761 _young_gen->resize(eden_size, survivor_size); 762 } 763 764 // Before delegating the resize to the old generation, 765 // the reserved space for the young and old generations 766 // may be changed to accomodate the desired resize. 767 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 768 if (UseAdaptiveGCBoundary) { 769 if (size_policy()->bytes_absorbed_from_eden() != 0) { 770 size_policy()->reset_bytes_absorbed_from_eden(); 771 return; // The generation changed size already. 772 } 773 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 774 } 775 776 // Delegate the resize to the generation. 777 _old_gen->resize(desired_free_space); 778 } 779 780 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 781 // nothing particular 782 } 783 784 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 785 // nothing particular 786 } 787 788 #ifndef PRODUCT 789 void ParallelScavengeHeap::record_gen_tops_before_GC() { 790 if (ZapUnusedHeapArea) { 791 young_gen()->record_spaces_top(); 792 old_gen()->record_spaces_top(); 793 } 794 } 795 796 void ParallelScavengeHeap::gen_mangle_unused_area() { 797 if (ZapUnusedHeapArea) { 798 young_gen()->eden_space()->mangle_unused_area(); 799 young_gen()->to_space()->mangle_unused_area(); 800 young_gen()->from_space()->mangle_unused_area(); 801 old_gen()->object_space()->mangle_unused_area(); 802 } 803 } 804 #endif