1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" 38 #include "gc_implementation/shared/gcHeapSummary.hpp" 39 #include "gc_implementation/shared/gcWhen.hpp" 40 #include "memory/gcLocker.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/vmThread.hpp" 45 #include "services/memTracker.hpp" 46 #include "utilities/vmError.hpp" 47 48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 50 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; 51 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 52 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 53 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 54 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 55 56 static void trace_gen_sizes(const char* const str, 57 size_t pg_min, size_t pg_max, 58 size_t og_min, size_t og_max, 59 size_t yg_min, size_t yg_max) 60 { 61 if (TracePageSizes) { 62 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " 63 SIZE_FORMAT "," SIZE_FORMAT " " 64 SIZE_FORMAT "," SIZE_FORMAT " " 65 SIZE_FORMAT, 66 str, pg_min / K, pg_max / K, 67 og_min / K, og_max / K, 68 yg_min / K, yg_max / K, 69 (pg_max + og_max + yg_max) / K); 70 } 71 } 72 73 jint ParallelScavengeHeap::initialize() { 74 CollectedHeap::pre_initialize(); 75 76 // Cannot be initialized until after the flags are parsed 77 // GenerationSizer flag_parser; 78 _collector_policy = new GenerationSizer(); 79 80 size_t yg_min_size = _collector_policy->min_young_gen_size(); 81 size_t yg_max_size = _collector_policy->max_young_gen_size(); 82 size_t og_min_size = _collector_policy->min_old_gen_size(); 83 size_t og_max_size = _collector_policy->max_old_gen_size(); 84 // Why isn't there a min_perm_gen_size()? 85 size_t pg_min_size = _collector_policy->perm_gen_size(); 86 size_t pg_max_size = _collector_policy->max_perm_gen_size(); 87 88 trace_gen_sizes("ps heap raw", 89 pg_min_size, pg_max_size, 90 og_min_size, og_max_size, 91 yg_min_size, yg_max_size); 92 93 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, 94 yg_max_size + og_max_size, 95 8); 96 97 // Use the same page size for both perm gen and old gen, 98 // to allow large pages to be allocated when the heap is reserved 99 // for the implementations that can't 'commit' large pages. 100 // NEEDS_CLEANUP. ReservedHeapSpace/ReservedSpace that takes both 101 // a prefix and a suffix alignment can now be removed. 102 const size_t pg_page_sz = og_page_sz; 103 104 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); 105 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); 106 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); 107 108 // Update sizes to reflect the selected page size(s). 109 // 110 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 111 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 112 // move to the common code. 113 yg_min_size = align_size_up(yg_min_size, yg_align); 114 yg_max_size = align_size_up(yg_max_size, yg_align); 115 size_t yg_cur_size = 116 align_size_up(_collector_policy->young_gen_size(), yg_align); 117 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 118 119 og_min_size = align_size_up(og_min_size, og_align); 120 // Align old gen size down to preserve specified heap size. 121 assert(og_align == yg_align, "sanity"); 122 og_max_size = align_size_down(og_max_size, og_align); 123 og_max_size = MAX2(og_max_size, og_min_size); 124 size_t og_cur_size = 125 align_size_down(_collector_policy->old_gen_size(), og_align); 126 og_cur_size = MAX2(og_cur_size, og_min_size); 127 128 pg_min_size = align_size_up(pg_min_size, pg_align); 129 pg_max_size = align_size_up(pg_max_size, pg_align); 130 size_t pg_cur_size = pg_min_size; 131 132 trace_gen_sizes("ps heap rnd", 133 pg_min_size, pg_max_size, 134 og_min_size, og_max_size, 135 yg_min_size, yg_max_size); 136 137 size_t total_reserved = 0; 138 139 total_reserved = add_and_check_overflow(total_reserved, pg_max_size); 140 total_reserved = add_and_check_overflow(total_reserved, og_max_size); 141 total_reserved = add_and_check_overflow(total_reserved, yg_max_size); 142 143 assert(is_size_aligned(total_reserved, og_align), "Must be"); 144 145 char* addr = Universe::preferred_heap_base(total_reserved, og_align, Universe::UnscaledNarrowOop); 146 147 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, 148 og_align, addr); 149 150 if (UseCompressedOops) { 151 if (addr != NULL && !heap_rs.is_reserved()) { 152 // Failed to reserve at specified address - the requested memory 153 // region is taken already, for example, by 'java' launcher. 154 // Try again to reserver heap higher. 155 addr = Universe::preferred_heap_base(total_reserved, og_align, Universe::ZeroBasedNarrowOop); 156 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, 157 og_align, addr); 158 if (addr != NULL && !heap_rs0.is_reserved()) { 159 // Failed to reserve at specified address again - give up. 160 addr = Universe::preferred_heap_base(total_reserved, og_align, Universe::HeapBasedNarrowOop); 161 assert(addr == NULL, ""); 162 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, 163 og_align, addr); 164 heap_rs = heap_rs1; 165 } else { 166 heap_rs = heap_rs0; 167 } 168 } 169 } 170 171 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); 172 173 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, 174 heap_rs.base(), pg_max_size); 175 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 176 og_max_size + yg_max_size, og_page_sz, 177 heap_rs.base() + pg_max_size, 178 heap_rs.size() - pg_max_size); 179 if (!heap_rs.is_reserved()) { 180 vm_shutdown_during_initialization( 181 "Could not reserve enough space for object heap"); 182 return JNI_ENOMEM; 183 } 184 185 _reserved = MemRegion((HeapWord*)heap_rs.base(), 186 (HeapWord*)(heap_rs.base() + heap_rs.size())); 187 188 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 189 _barrier_set = barrier_set; 190 oopDesc::set_bs(_barrier_set); 191 if (_barrier_set == NULL) { 192 vm_shutdown_during_initialization( 193 "Could not reserve enough space for barrier set"); 194 return JNI_ENOMEM; 195 } 196 197 // Initial young gen size is 4 Mb 198 // 199 // XXX - what about flag_parser.young_gen_size()? 200 const size_t init_young_size = align_size_up(4 * M, yg_align); 201 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); 202 203 // Split the reserved space into perm gen and the main heap (everything else). 204 // The main heap uses a different alignment. 205 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); 206 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); 207 208 // Make up the generations 209 // Calculate the maximum size that a generation can grow. This 210 // includes growth into the other generation. Note that the 211 // parameter _max_gen_size is kept as the maximum 212 // size of the generation as the boundaries currently stand. 213 // _max_gen_size is still used as that value. 214 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 215 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 216 217 _gens = new AdjoiningGenerations(main_rs, 218 og_cur_size, 219 og_min_size, 220 og_max_size, 221 yg_cur_size, 222 yg_min_size, 223 yg_max_size, 224 yg_align); 225 226 _old_gen = _gens->old_gen(); 227 _young_gen = _gens->young_gen(); 228 229 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 230 const size_t old_capacity = _old_gen->capacity_in_bytes(); 231 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 232 _size_policy = 233 new PSAdaptiveSizePolicy(eden_capacity, 234 initial_promo_size, 235 young_gen()->to_space()->capacity_in_bytes(), 236 intra_heap_alignment(), 237 max_gc_pause_sec, 238 max_gc_minor_pause_sec, 239 GCTimeRatio 240 ); 241 242 _perm_gen = new PSPermGen(perm_rs, 243 pg_align, 244 pg_cur_size, 245 pg_cur_size, 246 pg_max_size, 247 "perm", 2); 248 249 assert(!UseAdaptiveGCBoundary || 250 (old_gen()->virtual_space()->high_boundary() == 251 young_gen()->virtual_space()->low_boundary()), 252 "Boundaries must meet"); 253 // initialize the policy counters - 2 collectors, 3 generations 254 _gc_policy_counters = 255 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 256 _psh = this; 257 258 // Set up the GCTaskManager 259 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 260 261 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 262 return JNI_ENOMEM; 263 } 264 265 return JNI_OK; 266 } 267 268 void ParallelScavengeHeap::post_initialize() { 269 // Need to init the tenuring threshold 270 PSScavenge::initialize(); 271 if (UseParallelOldGC) { 272 PSParallelCompact::post_initialize(); 273 } else { 274 PSMarkSweep::initialize(); 275 } 276 PSPromotionManager::initialize(); 277 } 278 279 void ParallelScavengeHeap::update_counters() { 280 young_gen()->update_counters(); 281 old_gen()->update_counters(); 282 perm_gen()->update_counters(); 283 } 284 285 size_t ParallelScavengeHeap::capacity() const { 286 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 287 return value; 288 } 289 290 size_t ParallelScavengeHeap::used() const { 291 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 292 return value; 293 } 294 295 bool ParallelScavengeHeap::is_maximal_no_gc() const { 296 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 297 } 298 299 300 size_t ParallelScavengeHeap::permanent_capacity() const { 301 return perm_gen()->capacity_in_bytes(); 302 } 303 304 size_t ParallelScavengeHeap::permanent_used() const { 305 return perm_gen()->used_in_bytes(); 306 } 307 308 size_t ParallelScavengeHeap::max_capacity() const { 309 size_t estimated = reserved_region().byte_size(); 310 estimated -= perm_gen()->reserved().byte_size(); 311 if (UseAdaptiveSizePolicy) { 312 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 313 } else { 314 estimated -= young_gen()->to_space()->capacity_in_bytes(); 315 } 316 return MAX2(estimated, capacity()); 317 } 318 319 bool ParallelScavengeHeap::is_in(const void* p) const { 320 if (young_gen()->is_in(p)) { 321 return true; 322 } 323 324 if (old_gen()->is_in(p)) { 325 return true; 326 } 327 328 if (perm_gen()->is_in(p)) { 329 return true; 330 } 331 332 return false; 333 } 334 335 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 336 if (young_gen()->is_in_reserved(p)) { 337 return true; 338 } 339 340 if (old_gen()->is_in_reserved(p)) { 341 return true; 342 } 343 344 if (perm_gen()->is_in_reserved(p)) { 345 return true; 346 } 347 348 return false; 349 } 350 351 bool ParallelScavengeHeap::is_scavengable(const void* addr) { 352 return is_in_young((oop)addr); 353 } 354 355 #ifdef ASSERT 356 // Don't implement this by using is_in_young(). This method is used 357 // in some cases to check that is_in_young() is correct. 358 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { 359 assert(is_in_reserved(p) || p == NULL, 360 "Does not work if address is non-null and outside of the heap"); 361 // The order of the generations is perm (low addr), old, young (high addr) 362 return p >= old_gen()->reserved().end(); 363 } 364 #endif 365 366 // There are two levels of allocation policy here. 367 // 368 // When an allocation request fails, the requesting thread must invoke a VM 369 // operation, transfer control to the VM thread, and await the results of a 370 // garbage collection. That is quite expensive, and we should avoid doing it 371 // multiple times if possible. 372 // 373 // To accomplish this, we have a basic allocation policy, and also a 374 // failed allocation policy. 375 // 376 // The basic allocation policy controls how you allocate memory without 377 // attempting garbage collection. It is okay to grab locks and 378 // expand the heap, if that can be done without coming to a safepoint. 379 // It is likely that the basic allocation policy will not be very 380 // aggressive. 381 // 382 // The failed allocation policy is invoked from the VM thread after 383 // the basic allocation policy is unable to satisfy a mem_allocate 384 // request. This policy needs to cover the entire range of collection, 385 // heap expansion, and out-of-memory conditions. It should make every 386 // attempt to allocate the requested memory. 387 388 // Basic allocation policy. Should never be called at a safepoint, or 389 // from the VM thread. 390 // 391 // This method must handle cases where many mem_allocate requests fail 392 // simultaneously. When that happens, only one VM operation will succeed, 393 // and the rest will not be executed. For that reason, this method loops 394 // during failed allocation attempts. If the java heap becomes exhausted, 395 // we rely on the size_policy object to force a bail out. 396 HeapWord* ParallelScavengeHeap::mem_allocate( 397 size_t size, 398 bool* gc_overhead_limit_was_exceeded) { 399 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 400 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 401 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 402 403 // In general gc_overhead_limit_was_exceeded should be false so 404 // set it so here and reset it to true only if the gc time 405 // limit is being exceeded as checked below. 406 *gc_overhead_limit_was_exceeded = false; 407 408 HeapWord* result = young_gen()->allocate(size); 409 410 uint loop_count = 0; 411 uint gc_count = 0; 412 413 while (result == NULL) { 414 // We don't want to have multiple collections for a single filled generation. 415 // To prevent this, each thread tracks the total_collections() value, and if 416 // the count has changed, does not do a new collection. 417 // 418 // The collection count must be read only while holding the heap lock. VM 419 // operations also hold the heap lock during collections. There is a lock 420 // contention case where thread A blocks waiting on the Heap_lock, while 421 // thread B is holding it doing a collection. When thread A gets the lock, 422 // the collection count has already changed. To prevent duplicate collections, 423 // The policy MUST attempt allocations during the same period it reads the 424 // total_collections() value! 425 { 426 MutexLocker ml(Heap_lock); 427 gc_count = Universe::heap()->total_collections(); 428 429 result = young_gen()->allocate(size); 430 if (result != NULL) { 431 return result; 432 } 433 434 // If certain conditions hold, try allocating from the old gen. 435 result = mem_allocate_old_gen(size); 436 if (result != NULL) { 437 return result; 438 } 439 440 // Failed to allocate without a gc. 441 if (GC_locker::is_active_and_needs_gc()) { 442 // If this thread is not in a jni critical section, we stall 443 // the requestor until the critical section has cleared and 444 // GC allowed. When the critical section clears, a GC is 445 // initiated by the last thread exiting the critical section; so 446 // we retry the allocation sequence from the beginning of the loop, 447 // rather than causing more, now probably unnecessary, GC attempts. 448 JavaThread* jthr = JavaThread::current(); 449 if (!jthr->in_critical()) { 450 MutexUnlocker mul(Heap_lock); 451 GC_locker::stall_until_clear(); 452 continue; 453 } else { 454 if (CheckJNICalls) { 455 fatal("Possible deadlock due to allocating while" 456 " in jni critical section"); 457 } 458 return NULL; 459 } 460 } 461 } 462 463 if (result == NULL) { 464 // Generate a VM operation 465 VM_ParallelGCFailedAllocation op(size, gc_count); 466 VMThread::execute(&op); 467 468 // Did the VM operation execute? If so, return the result directly. 469 // This prevents us from looping until time out on requests that can 470 // not be satisfied. 471 if (op.prologue_succeeded()) { 472 assert(Universe::heap()->is_in_or_null(op.result()), 473 "result not in heap"); 474 475 // If GC was locked out during VM operation then retry allocation 476 // and/or stall as necessary. 477 if (op.gc_locked()) { 478 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 479 continue; // retry and/or stall as necessary 480 } 481 482 // Exit the loop if the gc time limit has been exceeded. 483 // The allocation must have failed above ("result" guarding 484 // this path is NULL) and the most recent collection has exceeded the 485 // gc overhead limit (although enough may have been collected to 486 // satisfy the allocation). Exit the loop so that an out-of-memory 487 // will be thrown (return a NULL ignoring the contents of 488 // op.result()), 489 // but clear gc_overhead_limit_exceeded so that the next collection 490 // starts with a clean slate (i.e., forgets about previous overhead 491 // excesses). Fill op.result() with a filler object so that the 492 // heap remains parsable. 493 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 494 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 495 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); 496 if (limit_exceeded && softrefs_clear) { 497 *gc_overhead_limit_was_exceeded = true; 498 size_policy()->set_gc_overhead_limit_exceeded(false); 499 if (PrintGCDetails && Verbose) { 500 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 501 "return NULL because gc_overhead_limit_exceeded is set"); 502 } 503 if (op.result() != NULL) { 504 CollectedHeap::fill_with_object(op.result(), size); 505 } 506 return NULL; 507 } 508 509 return op.result(); 510 } 511 } 512 513 // The policy object will prevent us from looping forever. If the 514 // time spent in gc crosses a threshold, we will bail out. 515 loop_count++; 516 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 517 (loop_count % QueuedAllocationWarningCount == 0)) { 518 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 519 " size=%d", loop_count, size); 520 } 521 } 522 523 return result; 524 } 525 526 // A "death march" is a series of ultra-slow allocations in which a full gc is 527 // done before each allocation, and after the full gc the allocation still 528 // cannot be satisfied from the young gen. This routine detects that condition; 529 // it should be called after a full gc has been done and the allocation 530 // attempted from the young gen. The parameter 'addr' should be the result of 531 // that young gen allocation attempt. 532 void 533 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 534 if (addr != NULL) { 535 _death_march_count = 0; // death march has ended 536 } else if (_death_march_count == 0) { 537 if (should_alloc_in_eden(size)) { 538 _death_march_count = 1; // death march has started 539 } 540 } 541 } 542 543 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 544 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) { 545 // Size is too big for eden, or gc is locked out. 546 return old_gen()->allocate(size); 547 } 548 549 // If a "death march" is in progress, allocate from the old gen a limited 550 // number of times before doing a GC. 551 if (_death_march_count > 0) { 552 if (_death_march_count < 64) { 553 ++_death_march_count; 554 return old_gen()->allocate(size); 555 } else { 556 _death_march_count = 0; 557 } 558 } 559 return NULL; 560 } 561 562 // Failed allocation policy. Must be called from the VM thread, and 563 // only at a safepoint! Note that this method has policy for allocation 564 // flow, and NOT collection policy. So we do not check for gc collection 565 // time over limit here, that is the responsibility of the heap specific 566 // collection methods. This method decides where to attempt allocations, 567 // and when to attempt collections, but no collection specific policy. 568 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 569 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 570 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 571 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 572 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 573 574 // We assume that allocation in eden will fail unless we collect. 575 576 // First level allocation failure, scavenge and allocate in young gen. 577 GCCauseSetter gccs(this, GCCause::_allocation_failure); 578 const bool invoked_full_gc = PSScavenge::invoke(); 579 HeapWord* result = young_gen()->allocate(size); 580 581 // Second level allocation failure. 582 // Mark sweep and allocate in young generation. 583 if (result == NULL && !invoked_full_gc) { 584 invoke_full_gc(false); 585 result = young_gen()->allocate(size); 586 } 587 588 death_march_check(result, size); 589 590 // Third level allocation failure. 591 // After mark sweep and young generation allocation failure, 592 // allocate in old generation. 593 if (result == NULL) { 594 result = old_gen()->allocate(size); 595 } 596 597 // Fourth level allocation failure. We're running out of memory. 598 // More complete mark sweep and allocate in young generation. 599 if (result == NULL) { 600 invoke_full_gc(true); 601 result = young_gen()->allocate(size); 602 } 603 604 // Fifth level allocation failure. 605 // After more complete mark sweep, allocate in old generation. 606 if (result == NULL) { 607 result = old_gen()->allocate(size); 608 } 609 610 return result; 611 } 612 613 // 614 // This is the policy loop for allocating in the permanent generation. 615 // If the initial allocation fails, we create a vm operation which will 616 // cause a collection. 617 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { 618 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 619 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 620 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 621 622 HeapWord* result; 623 624 uint loop_count = 0; 625 uint gc_count = 0; 626 uint full_gc_count = 0; 627 628 do { 629 // We don't want to have multiple collections for a single filled generation. 630 // To prevent this, each thread tracks the total_collections() value, and if 631 // the count has changed, does not do a new collection. 632 // 633 // The collection count must be read only while holding the heap lock. VM 634 // operations also hold the heap lock during collections. There is a lock 635 // contention case where thread A blocks waiting on the Heap_lock, while 636 // thread B is holding it doing a collection. When thread A gets the lock, 637 // the collection count has already changed. To prevent duplicate collections, 638 // The policy MUST attempt allocations during the same period it reads the 639 // total_collections() value! 640 { 641 MutexLocker ml(Heap_lock); 642 gc_count = Universe::heap()->total_collections(); 643 full_gc_count = Universe::heap()->total_full_collections(); 644 645 result = perm_gen()->allocate_permanent(size); 646 647 if (result != NULL) { 648 return result; 649 } 650 651 if (GC_locker::is_active_and_needs_gc()) { 652 // If this thread is not in a jni critical section, we stall 653 // the requestor until the critical section has cleared and 654 // GC allowed. When the critical section clears, a GC is 655 // initiated by the last thread exiting the critical section; so 656 // we retry the allocation sequence from the beginning of the loop, 657 // rather than causing more, now probably unnecessary, GC attempts. 658 JavaThread* jthr = JavaThread::current(); 659 if (!jthr->in_critical()) { 660 MutexUnlocker mul(Heap_lock); 661 GC_locker::stall_until_clear(); 662 continue; 663 } else { 664 if (CheckJNICalls) { 665 fatal("Possible deadlock due to allocating while" 666 " in jni critical section"); 667 } 668 return NULL; 669 } 670 } 671 } 672 673 if (result == NULL) { 674 675 // Exit the loop if the gc time limit has been exceeded. 676 // The allocation must have failed above (result must be NULL), 677 // and the most recent collection must have exceeded the 678 // gc time limit. Exit the loop so that an out-of-memory 679 // will be thrown (returning a NULL will do that), but 680 // clear gc_overhead_limit_exceeded so that the next collection 681 // will succeeded if the applications decides to handle the 682 // out-of-memory and tries to go on. 683 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 684 if (limit_exceeded) { 685 size_policy()->set_gc_overhead_limit_exceeded(false); 686 if (PrintGCDetails && Verbose) { 687 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" 688 " return NULL because gc_overhead_limit_exceeded is set"); 689 } 690 assert(result == NULL, "Allocation did not fail"); 691 return NULL; 692 } 693 694 // Generate a VM operation 695 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); 696 VMThread::execute(&op); 697 698 // Did the VM operation execute? If so, return the result directly. 699 // This prevents us from looping until time out on requests that can 700 // not be satisfied. 701 if (op.prologue_succeeded()) { 702 assert(Universe::heap()->is_in_permanent_or_null(op.result()), 703 "result not in heap"); 704 // If GC was locked out during VM operation then retry allocation 705 // and/or stall as necessary. 706 if (op.gc_locked()) { 707 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 708 continue; // retry and/or stall as necessary 709 } 710 // If a NULL results is being returned, an out-of-memory 711 // will be thrown now. Clear the gc_overhead_limit_exceeded 712 // flag to avoid the following situation. 713 // gc_overhead_limit_exceeded is set during a collection 714 // the collection fails to return enough space and an OOM is thrown 715 // a subsequent GC prematurely throws an out-of-memory because 716 // the gc_overhead_limit_exceeded counts did not start 717 // again from 0. 718 if (op.result() == NULL) { 719 size_policy()->reset_gc_overhead_limit_count(); 720 } 721 return op.result(); 722 } 723 } 724 725 // The policy object will prevent us from looping forever. If the 726 // time spent in gc crosses a threshold, we will bail out. 727 loop_count++; 728 if ((QueuedAllocationWarningCount > 0) && 729 (loop_count % QueuedAllocationWarningCount == 0)) { 730 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" 731 " size=%d", loop_count, size); 732 } 733 } while (result == NULL); 734 735 return result; 736 } 737 738 // 739 // This is the policy code for permanent allocations which have failed 740 // and require a collection. Note that just as in failed_mem_allocate, 741 // we do not set collection policy, only where & when to allocate and 742 // collect. 743 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { 744 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 745 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 746 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 747 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 748 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); 749 750 // We assume (and assert!) that an allocation at this point will fail 751 // unless we collect. 752 753 // First level allocation failure. Mark-sweep and allocate in perm gen. 754 GCCauseSetter gccs(this, GCCause::_allocation_failure); 755 invoke_full_gc(false); 756 HeapWord* result = perm_gen()->allocate_permanent(size); 757 758 // Second level allocation failure. We're running out of memory. 759 if (result == NULL) { 760 invoke_full_gc(true); 761 result = perm_gen()->allocate_permanent(size); 762 } 763 764 return result; 765 } 766 767 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 768 CollectedHeap::ensure_parsability(retire_tlabs); 769 young_gen()->eden_space()->ensure_parsability(); 770 } 771 772 size_t ParallelScavengeHeap::unsafe_max_alloc() { 773 return young_gen()->eden_space()->free_in_bytes(); 774 } 775 776 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 777 return young_gen()->eden_space()->tlab_capacity(thr); 778 } 779 780 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 781 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 782 } 783 784 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 785 return young_gen()->allocate(size); 786 } 787 788 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 789 CollectedHeap::accumulate_statistics_all_tlabs(); 790 } 791 792 void ParallelScavengeHeap::resize_all_tlabs() { 793 CollectedHeap::resize_all_tlabs(); 794 } 795 796 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 797 // We don't need barriers for stores to objects in the 798 // young gen and, a fortiori, for initializing stores to 799 // objects therein. 800 return is_in_young(new_obj); 801 } 802 803 // This method is used by System.gc() and JVMTI. 804 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 805 assert(!Heap_lock->owned_by_self(), 806 "this thread should not own the Heap_lock"); 807 808 unsigned int gc_count = 0; 809 unsigned int full_gc_count = 0; 810 { 811 MutexLocker ml(Heap_lock); 812 // This value is guarded by the Heap_lock 813 gc_count = Universe::heap()->total_collections(); 814 full_gc_count = Universe::heap()->total_full_collections(); 815 } 816 817 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 818 VMThread::execute(&op); 819 } 820 821 // This interface assumes that it's being called by the 822 // vm thread. It collects the heap assuming that the 823 // heap lock is already held and that we are executing in 824 // the context of the vm thread. 825 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { 826 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 827 assert(Heap_lock->is_locked(), "Precondition#2"); 828 GCCauseSetter gcs(this, cause); 829 switch (cause) { 830 case GCCause::_heap_inspection: 831 case GCCause::_heap_dump: { 832 HandleMark hm; 833 invoke_full_gc(false); 834 break; 835 } 836 default: // XXX FIX ME 837 ShouldNotReachHere(); 838 } 839 } 840 841 842 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { 843 Unimplemented(); 844 } 845 846 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 847 young_gen()->object_iterate(cl); 848 old_gen()->object_iterate(cl); 849 perm_gen()->object_iterate(cl); 850 } 851 852 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { 853 Unimplemented(); 854 } 855 856 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { 857 perm_gen()->object_iterate(cl); 858 } 859 860 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 861 if (young_gen()->is_in_reserved(addr)) { 862 assert(young_gen()->is_in(addr), 863 "addr should be in allocated part of young gen"); 864 // called from os::print_location by find or VMError 865 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 866 Unimplemented(); 867 } else if (old_gen()->is_in_reserved(addr)) { 868 assert(old_gen()->is_in(addr), 869 "addr should be in allocated part of old gen"); 870 return old_gen()->start_array()->object_start((HeapWord*)addr); 871 } else if (perm_gen()->is_in_reserved(addr)) { 872 assert(perm_gen()->is_in(addr), 873 "addr should be in allocated part of perm gen"); 874 return perm_gen()->start_array()->object_start((HeapWord*)addr); 875 } 876 return 0; 877 } 878 879 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 880 return oop(addr)->size(); 881 } 882 883 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 884 return block_start(addr) == addr; 885 } 886 887 jlong ParallelScavengeHeap::millis_since_last_gc() { 888 return UseParallelOldGC ? 889 PSParallelCompact::millis_since_last_gc() : 890 PSMarkSweep::millis_since_last_gc(); 891 } 892 893 void ParallelScavengeHeap::prepare_for_verify() { 894 ensure_parsability(false); // no need to retire TLABs for verification 895 } 896 897 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 898 PSOldGen* old = old_gen(); 899 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 900 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 901 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 902 903 PSYoungGen* young = young_gen(); 904 VirtualSpaceSummary young_summary(young->reserved().start(), 905 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 906 907 MutableSpace* eden = young_gen()->eden_space(); 908 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 909 910 MutableSpace* from = young_gen()->from_space(); 911 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 912 913 MutableSpace* to = young_gen()->to_space(); 914 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 915 916 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 917 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 918 } 919 920 VirtualSpaceSummary ParallelScavengeHeap::create_perm_gen_space_summary() { 921 PSVirtualSpace* space = perm_gen()->virtual_space(); 922 return VirtualSpaceSummary( 923 (HeapWord*)space->low_boundary(), 924 (HeapWord*)space->high(), 925 (HeapWord*)space->high_boundary()); 926 } 927 928 void ParallelScavengeHeap::print_on(outputStream* st) const { 929 young_gen()->print_on(st); 930 old_gen()->print_on(st); 931 perm_gen()->print_on(st); 932 } 933 934 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 935 PSScavenge::gc_task_manager()->threads_do(tc); 936 } 937 938 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 939 PSScavenge::gc_task_manager()->print_threads_on(st); 940 } 941 942 void ParallelScavengeHeap::print_tracing_info() const { 943 if (TraceGen0Time) { 944 double time = PSScavenge::accumulated_time()->seconds(); 945 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 946 } 947 if (TraceGen1Time) { 948 double time = PSMarkSweep::accumulated_time()->seconds(); 949 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 950 } 951 } 952 953 954 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { 955 // Why do we need the total_collections()-filter below? 956 if (total_collections() > 0) { 957 if (!silent) { 958 gclog_or_tty->print("permanent "); 959 } 960 perm_gen()->verify(); 961 962 if (!silent) { 963 gclog_or_tty->print("tenured "); 964 } 965 old_gen()->verify(); 966 967 if (!silent) { 968 gclog_or_tty->print("eden "); 969 } 970 young_gen()->verify(); 971 } 972 } 973 974 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 975 if (PrintGCDetails && Verbose) { 976 gclog_or_tty->print(" " SIZE_FORMAT 977 "->" SIZE_FORMAT 978 "(" SIZE_FORMAT ")", 979 prev_used, used(), capacity()); 980 } else { 981 gclog_or_tty->print(" " SIZE_FORMAT "K" 982 "->" SIZE_FORMAT "K" 983 "(" SIZE_FORMAT "K)", 984 prev_used / K, used() / K, capacity() / K); 985 } 986 } 987 988 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { 989 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 990 const PermGenSummary& perm_gen_summary = create_perm_gen_summary(); 991 gc_tracer->report_gc_heap_summary(when, heap_summary, perm_gen_summary); 992 } 993 994 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 995 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 996 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 997 return _psh; 998 } 999 1000 // Before delegating the resize to the young generation, 1001 // the reserved space for the young and old generations 1002 // may be changed to accomodate the desired resize. 1003 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 1004 size_t survivor_size) { 1005 if (UseAdaptiveGCBoundary) { 1006 if (size_policy()->bytes_absorbed_from_eden() != 0) { 1007 size_policy()->reset_bytes_absorbed_from_eden(); 1008 return; // The generation changed size already. 1009 } 1010 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 1011 } 1012 1013 // Delegate the resize to the generation. 1014 _young_gen->resize(eden_size, survivor_size); 1015 } 1016 1017 // Before delegating the resize to the old generation, 1018 // the reserved space for the young and old generations 1019 // may be changed to accomodate the desired resize. 1020 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 1021 if (UseAdaptiveGCBoundary) { 1022 if (size_policy()->bytes_absorbed_from_eden() != 0) { 1023 size_policy()->reset_bytes_absorbed_from_eden(); 1024 return; // The generation changed size already. 1025 } 1026 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 1027 } 1028 1029 // Delegate the resize to the generation. 1030 _old_gen->resize(desired_free_space); 1031 } 1032 1033 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 1034 // nothing particular 1035 } 1036 1037 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 1038 // nothing particular 1039 } 1040 1041 #ifndef PRODUCT 1042 void ParallelScavengeHeap::record_gen_tops_before_GC() { 1043 if (ZapUnusedHeapArea) { 1044 young_gen()->record_spaces_top(); 1045 old_gen()->record_spaces_top(); 1046 perm_gen()->record_spaces_top(); 1047 } 1048 } 1049 1050 void ParallelScavengeHeap::gen_mangle_unused_area() { 1051 if (ZapUnusedHeapArea) { 1052 young_gen()->eden_space()->mangle_unused_area(); 1053 young_gen()->to_space()->mangle_unused_area(); 1054 young_gen()->from_space()->mangle_unused_area(); 1055 old_gen()->object_space()->mangle_unused_area(); 1056 perm_gen()->object_space()->mangle_unused_area(); 1057 } 1058 } 1059 #endif