1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" 38 #include "gc_implementation/shared/gcHeapSummary.hpp" 39 #include "gc_implementation/shared/gcWhen.hpp" 40 #include "memory/gcLocker.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/vmThread.hpp" 45 #include "services/memTracker.hpp" 46 #include "utilities/vmError.hpp" 47 48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 50 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; 51 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 52 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 53 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 54 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 55 56 static void trace_gen_sizes(const char* const str, 57 size_t pg_min, size_t pg_max, 58 size_t og_min, size_t og_max, 59 size_t yg_min, size_t yg_max) 60 { 61 if (TracePageSizes) { 62 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " 63 SIZE_FORMAT "," SIZE_FORMAT " " 64 SIZE_FORMAT "," SIZE_FORMAT " " 65 SIZE_FORMAT, 66 str, pg_min / K, pg_max / K, 67 og_min / K, og_max / K, 68 yg_min / K, yg_max / K, 69 (pg_max + og_max + yg_max) / K); 70 } 71 } 72 73 jint ParallelScavengeHeap::initialize() { 74 CollectedHeap::pre_initialize(); 75 76 // Cannot be initialized until after the flags are parsed 77 // GenerationSizer flag_parser; 78 _collector_policy = new GenerationSizer(); 79 80 size_t yg_min_size = _collector_policy->min_young_gen_size(); 81 size_t yg_max_size = _collector_policy->max_young_gen_size(); 82 size_t og_min_size = _collector_policy->min_old_gen_size(); 83 size_t og_max_size = _collector_policy->max_old_gen_size(); 84 // Why isn't there a min_perm_gen_size()? 85 size_t pg_min_size = _collector_policy->perm_gen_size(); 86 size_t pg_max_size = _collector_policy->max_perm_gen_size(); 87 88 trace_gen_sizes("ps heap raw", 89 pg_min_size, pg_max_size, 90 og_min_size, og_max_size, 91 yg_min_size, yg_max_size); 92 93 // The ReservedSpace ctor used below requires that the page size for the perm 94 // gen is <= the page size for the rest of the heap (young + old gens). 95 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, 96 yg_max_size + og_max_size, 97 8); 98 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, 99 pg_max_size, 16), 100 og_page_sz); 101 102 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); 103 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); 104 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); 105 106 // Update sizes to reflect the selected page size(s). 107 // 108 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 109 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 110 // move to the common code. 111 yg_min_size = align_size_up(yg_min_size, yg_align); 112 yg_max_size = align_size_up(yg_max_size, yg_align); 113 size_t yg_cur_size = 114 align_size_up(_collector_policy->young_gen_size(), yg_align); 115 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 116 117 og_min_size = align_size_up(og_min_size, og_align); 118 // Align old gen size down to preserve specified heap size. 119 assert(og_align == yg_align, "sanity"); 120 og_max_size = align_size_down(og_max_size, og_align); 121 og_max_size = MAX2(og_max_size, og_min_size); 122 size_t og_cur_size = 123 align_size_down(_collector_policy->old_gen_size(), og_align); 124 og_cur_size = MAX2(og_cur_size, og_min_size); 125 126 pg_min_size = align_size_up(pg_min_size, pg_align); 127 pg_max_size = align_size_up(pg_max_size, pg_align); 128 size_t pg_cur_size = pg_min_size; 129 130 trace_gen_sizes("ps heap rnd", 131 pg_min_size, pg_max_size, 132 og_min_size, og_max_size, 133 yg_min_size, yg_max_size); 134 135 size_t total_reserved = 0; 136 137 total_reserved = add_and_check_overflow(total_reserved, pg_max_size); 138 total_reserved = add_and_check_overflow(total_reserved, og_max_size); 139 total_reserved = add_and_check_overflow(total_reserved, yg_max_size); 140 141 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 142 143 // The main part of the heap (old gen + young gen) can often use a larger page 144 // size than is needed or wanted for the perm gen. Use the "compound 145 // alignment" ReservedSpace ctor to avoid having to use the same page size for 146 // all gens. 147 148 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, 149 og_align, addr); 150 151 if (UseCompressedOops) { 152 if (addr != NULL && !heap_rs.is_reserved()) { 153 // Failed to reserve at specified address - the requested memory 154 // region is taken already, for example, by 'java' launcher. 155 // Try again to reserver heap higher. 156 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); 157 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, 158 og_align, addr); 159 if (addr != NULL && !heap_rs0.is_reserved()) { 160 // Failed to reserve at specified address again - give up. 161 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); 162 assert(addr == NULL, ""); 163 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, 164 og_align, addr); 165 heap_rs = heap_rs1; 166 } else { 167 heap_rs = heap_rs0; 168 } 169 } 170 } 171 172 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); 173 174 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, 175 heap_rs.base(), pg_max_size); 176 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 177 og_max_size + yg_max_size, og_page_sz, 178 heap_rs.base() + pg_max_size, 179 heap_rs.size() - pg_max_size); 180 if (!heap_rs.is_reserved()) { 181 vm_shutdown_during_initialization( 182 "Could not reserve enough space for object heap"); 183 return JNI_ENOMEM; 184 } 185 186 _reserved = MemRegion((HeapWord*)heap_rs.base(), 187 (HeapWord*)(heap_rs.base() + heap_rs.size())); 188 189 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 190 _barrier_set = barrier_set; 191 oopDesc::set_bs(_barrier_set); 192 if (_barrier_set == NULL) { 193 vm_shutdown_during_initialization( 194 "Could not reserve enough space for barrier set"); 195 return JNI_ENOMEM; 196 } 197 198 // Initial young gen size is 4 Mb 199 // 200 // XXX - what about flag_parser.young_gen_size()? 201 const size_t init_young_size = align_size_up(4 * M, yg_align); 202 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); 203 204 // Split the reserved space into perm gen and the main heap (everything else). 205 // The main heap uses a different alignment. 206 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); 207 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); 208 209 // Make up the generations 210 // Calculate the maximum size that a generation can grow. This 211 // includes growth into the other generation. Note that the 212 // parameter _max_gen_size is kept as the maximum 213 // size of the generation as the boundaries currently stand. 214 // _max_gen_size is still used as that value. 215 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 216 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 217 218 _gens = new AdjoiningGenerations(main_rs, 219 og_cur_size, 220 og_min_size, 221 og_max_size, 222 yg_cur_size, 223 yg_min_size, 224 yg_max_size, 225 yg_align); 226 227 _old_gen = _gens->old_gen(); 228 _young_gen = _gens->young_gen(); 229 230 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 231 const size_t old_capacity = _old_gen->capacity_in_bytes(); 232 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 233 _size_policy = 234 new PSAdaptiveSizePolicy(eden_capacity, 235 initial_promo_size, 236 young_gen()->to_space()->capacity_in_bytes(), 237 intra_heap_alignment(), 238 max_gc_pause_sec, 239 max_gc_minor_pause_sec, 240 GCTimeRatio 241 ); 242 243 _perm_gen = new PSPermGen(perm_rs, 244 pg_align, 245 pg_cur_size, 246 pg_cur_size, 247 pg_max_size, 248 "perm", 2); 249 250 assert(!UseAdaptiveGCBoundary || 251 (old_gen()->virtual_space()->high_boundary() == 252 young_gen()->virtual_space()->low_boundary()), 253 "Boundaries must meet"); 254 // initialize the policy counters - 2 collectors, 3 generations 255 _gc_policy_counters = 256 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 257 _psh = this; 258 259 // Set up the GCTaskManager 260 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 261 262 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 263 return JNI_ENOMEM; 264 } 265 266 return JNI_OK; 267 } 268 269 void ParallelScavengeHeap::post_initialize() { 270 // Need to init the tenuring threshold 271 PSScavenge::initialize(); 272 if (UseParallelOldGC) { 273 PSParallelCompact::post_initialize(); 274 } else { 275 PSMarkSweep::initialize(); 276 } 277 PSPromotionManager::initialize(); 278 } 279 280 void ParallelScavengeHeap::update_counters() { 281 young_gen()->update_counters(); 282 old_gen()->update_counters(); 283 perm_gen()->update_counters(); 284 } 285 286 size_t ParallelScavengeHeap::capacity() const { 287 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 288 return value; 289 } 290 291 size_t ParallelScavengeHeap::used() const { 292 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 293 return value; 294 } 295 296 bool ParallelScavengeHeap::is_maximal_no_gc() const { 297 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 298 } 299 300 301 size_t ParallelScavengeHeap::permanent_capacity() const { 302 return perm_gen()->capacity_in_bytes(); 303 } 304 305 size_t ParallelScavengeHeap::permanent_used() const { 306 return perm_gen()->used_in_bytes(); 307 } 308 309 size_t ParallelScavengeHeap::max_capacity() const { 310 size_t estimated = reserved_region().byte_size(); 311 estimated -= perm_gen()->reserved().byte_size(); 312 if (UseAdaptiveSizePolicy) { 313 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 314 } else { 315 estimated -= young_gen()->to_space()->capacity_in_bytes(); 316 } 317 return MAX2(estimated, capacity()); 318 } 319 320 bool ParallelScavengeHeap::is_in(const void* p) const { 321 if (young_gen()->is_in(p)) { 322 return true; 323 } 324 325 if (old_gen()->is_in(p)) { 326 return true; 327 } 328 329 if (perm_gen()->is_in(p)) { 330 return true; 331 } 332 333 return false; 334 } 335 336 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 337 if (young_gen()->is_in_reserved(p)) { 338 return true; 339 } 340 341 if (old_gen()->is_in_reserved(p)) { 342 return true; 343 } 344 345 if (perm_gen()->is_in_reserved(p)) { 346 return true; 347 } 348 349 return false; 350 } 351 352 bool ParallelScavengeHeap::is_scavengable(const void* addr) { 353 return is_in_young((oop)addr); 354 } 355 356 #ifdef ASSERT 357 // Don't implement this by using is_in_young(). This method is used 358 // in some cases to check that is_in_young() is correct. 359 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { 360 assert(is_in_reserved(p) || p == NULL, 361 "Does not work if address is non-null and outside of the heap"); 362 // The order of the generations is perm (low addr), old, young (high addr) 363 return p >= old_gen()->reserved().end(); 364 } 365 #endif 366 367 // There are two levels of allocation policy here. 368 // 369 // When an allocation request fails, the requesting thread must invoke a VM 370 // operation, transfer control to the VM thread, and await the results of a 371 // garbage collection. That is quite expensive, and we should avoid doing it 372 // multiple times if possible. 373 // 374 // To accomplish this, we have a basic allocation policy, and also a 375 // failed allocation policy. 376 // 377 // The basic allocation policy controls how you allocate memory without 378 // attempting garbage collection. It is okay to grab locks and 379 // expand the heap, if that can be done without coming to a safepoint. 380 // It is likely that the basic allocation policy will not be very 381 // aggressive. 382 // 383 // The failed allocation policy is invoked from the VM thread after 384 // the basic allocation policy is unable to satisfy a mem_allocate 385 // request. This policy needs to cover the entire range of collection, 386 // heap expansion, and out-of-memory conditions. It should make every 387 // attempt to allocate the requested memory. 388 389 // Basic allocation policy. Should never be called at a safepoint, or 390 // from the VM thread. 391 // 392 // This method must handle cases where many mem_allocate requests fail 393 // simultaneously. When that happens, only one VM operation will succeed, 394 // and the rest will not be executed. For that reason, this method loops 395 // during failed allocation attempts. If the java heap becomes exhausted, 396 // we rely on the size_policy object to force a bail out. 397 HeapWord* ParallelScavengeHeap::mem_allocate( 398 size_t size, 399 bool* gc_overhead_limit_was_exceeded) { 400 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 401 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 402 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 403 404 // In general gc_overhead_limit_was_exceeded should be false so 405 // set it so here and reset it to true only if the gc time 406 // limit is being exceeded as checked below. 407 *gc_overhead_limit_was_exceeded = false; 408 409 HeapWord* result = young_gen()->allocate(size); 410 411 uint loop_count = 0; 412 uint gc_count = 0; 413 414 while (result == NULL) { 415 // We don't want to have multiple collections for a single filled generation. 416 // To prevent this, each thread tracks the total_collections() value, and if 417 // the count has changed, does not do a new collection. 418 // 419 // The collection count must be read only while holding the heap lock. VM 420 // operations also hold the heap lock during collections. There is a lock 421 // contention case where thread A blocks waiting on the Heap_lock, while 422 // thread B is holding it doing a collection. When thread A gets the lock, 423 // the collection count has already changed. To prevent duplicate collections, 424 // The policy MUST attempt allocations during the same period it reads the 425 // total_collections() value! 426 { 427 MutexLocker ml(Heap_lock); 428 gc_count = Universe::heap()->total_collections(); 429 430 result = young_gen()->allocate(size); 431 if (result != NULL) { 432 return result; 433 } 434 435 // If certain conditions hold, try allocating from the old gen. 436 result = mem_allocate_old_gen(size); 437 if (result != NULL) { 438 return result; 439 } 440 441 // Failed to allocate without a gc. 442 if (GC_locker::is_active_and_needs_gc()) { 443 // If this thread is not in a jni critical section, we stall 444 // the requestor until the critical section has cleared and 445 // GC allowed. When the critical section clears, a GC is 446 // initiated by the last thread exiting the critical section; so 447 // we retry the allocation sequence from the beginning of the loop, 448 // rather than causing more, now probably unnecessary, GC attempts. 449 JavaThread* jthr = JavaThread::current(); 450 if (!jthr->in_critical()) { 451 MutexUnlocker mul(Heap_lock); 452 GC_locker::stall_until_clear(); 453 continue; 454 } else { 455 if (CheckJNICalls) { 456 fatal("Possible deadlock due to allocating while" 457 " in jni critical section"); 458 } 459 return NULL; 460 } 461 } 462 } 463 464 if (result == NULL) { 465 // Generate a VM operation 466 VM_ParallelGCFailedAllocation op(size, gc_count); 467 VMThread::execute(&op); 468 469 // Did the VM operation execute? If so, return the result directly. 470 // This prevents us from looping until time out on requests that can 471 // not be satisfied. 472 if (op.prologue_succeeded()) { 473 assert(Universe::heap()->is_in_or_null(op.result()), 474 "result not in heap"); 475 476 // If GC was locked out during VM operation then retry allocation 477 // and/or stall as necessary. 478 if (op.gc_locked()) { 479 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 480 continue; // retry and/or stall as necessary 481 } 482 483 // Exit the loop if the gc time limit has been exceeded. 484 // The allocation must have failed above ("result" guarding 485 // this path is NULL) and the most recent collection has exceeded the 486 // gc overhead limit (although enough may have been collected to 487 // satisfy the allocation). Exit the loop so that an out-of-memory 488 // will be thrown (return a NULL ignoring the contents of 489 // op.result()), 490 // but clear gc_overhead_limit_exceeded so that the next collection 491 // starts with a clean slate (i.e., forgets about previous overhead 492 // excesses). Fill op.result() with a filler object so that the 493 // heap remains parsable. 494 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 495 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 496 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); 497 if (limit_exceeded && softrefs_clear) { 498 *gc_overhead_limit_was_exceeded = true; 499 size_policy()->set_gc_overhead_limit_exceeded(false); 500 if (PrintGCDetails && Verbose) { 501 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 502 "return NULL because gc_overhead_limit_exceeded is set"); 503 } 504 if (op.result() != NULL) { 505 CollectedHeap::fill_with_object(op.result(), size); 506 } 507 return NULL; 508 } 509 510 return op.result(); 511 } 512 } 513 514 // The policy object will prevent us from looping forever. If the 515 // time spent in gc crosses a threshold, we will bail out. 516 loop_count++; 517 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 518 (loop_count % QueuedAllocationWarningCount == 0)) { 519 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 520 " size=%d", loop_count, size); 521 } 522 } 523 524 return result; 525 } 526 527 // A "death march" is a series of ultra-slow allocations in which a full gc is 528 // done before each allocation, and after the full gc the allocation still 529 // cannot be satisfied from the young gen. This routine detects that condition; 530 // it should be called after a full gc has been done and the allocation 531 // attempted from the young gen. The parameter 'addr' should be the result of 532 // that young gen allocation attempt. 533 void 534 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 535 if (addr != NULL) { 536 _death_march_count = 0; // death march has ended 537 } else if (_death_march_count == 0) { 538 if (should_alloc_in_eden(size)) { 539 _death_march_count = 1; // death march has started 540 } 541 } 542 } 543 544 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 545 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) { 546 // Size is too big for eden, or gc is locked out. 547 return old_gen()->allocate(size); 548 } 549 550 // If a "death march" is in progress, allocate from the old gen a limited 551 // number of times before doing a GC. 552 if (_death_march_count > 0) { 553 if (_death_march_count < 64) { 554 ++_death_march_count; 555 return old_gen()->allocate(size); 556 } else { 557 _death_march_count = 0; 558 } 559 } 560 return NULL; 561 } 562 563 // Failed allocation policy. Must be called from the VM thread, and 564 // only at a safepoint! Note that this method has policy for allocation 565 // flow, and NOT collection policy. So we do not check for gc collection 566 // time over limit here, that is the responsibility of the heap specific 567 // collection methods. This method decides where to attempt allocations, 568 // and when to attempt collections, but no collection specific policy. 569 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 570 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 571 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 572 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 573 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 574 575 // We assume that allocation in eden will fail unless we collect. 576 577 // First level allocation failure, scavenge and allocate in young gen. 578 GCCauseSetter gccs(this, GCCause::_allocation_failure); 579 const bool invoked_full_gc = PSScavenge::invoke(); 580 HeapWord* result = young_gen()->allocate(size); 581 582 // Second level allocation failure. 583 // Mark sweep and allocate in young generation. 584 if (result == NULL && !invoked_full_gc) { 585 invoke_full_gc(false); 586 result = young_gen()->allocate(size); 587 } 588 589 death_march_check(result, size); 590 591 // Third level allocation failure. 592 // After mark sweep and young generation allocation failure, 593 // allocate in old generation. 594 if (result == NULL) { 595 result = old_gen()->allocate(size); 596 } 597 598 // Fourth level allocation failure. We're running out of memory. 599 // More complete mark sweep and allocate in young generation. 600 if (result == NULL) { 601 invoke_full_gc(true); 602 result = young_gen()->allocate(size); 603 } 604 605 // Fifth level allocation failure. 606 // After more complete mark sweep, allocate in old generation. 607 if (result == NULL) { 608 result = old_gen()->allocate(size); 609 } 610 611 return result; 612 } 613 614 // 615 // This is the policy loop for allocating in the permanent generation. 616 // If the initial allocation fails, we create a vm operation which will 617 // cause a collection. 618 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { 619 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 620 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 621 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 622 623 HeapWord* result; 624 625 uint loop_count = 0; 626 uint gc_count = 0; 627 uint full_gc_count = 0; 628 629 do { 630 // We don't want to have multiple collections for a single filled generation. 631 // To prevent this, each thread tracks the total_collections() value, and if 632 // the count has changed, does not do a new collection. 633 // 634 // The collection count must be read only while holding the heap lock. VM 635 // operations also hold the heap lock during collections. There is a lock 636 // contention case where thread A blocks waiting on the Heap_lock, while 637 // thread B is holding it doing a collection. When thread A gets the lock, 638 // the collection count has already changed. To prevent duplicate collections, 639 // The policy MUST attempt allocations during the same period it reads the 640 // total_collections() value! 641 { 642 MutexLocker ml(Heap_lock); 643 gc_count = Universe::heap()->total_collections(); 644 full_gc_count = Universe::heap()->total_full_collections(); 645 646 result = perm_gen()->allocate_permanent(size); 647 648 if (result != NULL) { 649 return result; 650 } 651 652 if (GC_locker::is_active_and_needs_gc()) { 653 // If this thread is not in a jni critical section, we stall 654 // the requestor until the critical section has cleared and 655 // GC allowed. When the critical section clears, a GC is 656 // initiated by the last thread exiting the critical section; so 657 // we retry the allocation sequence from the beginning of the loop, 658 // rather than causing more, now probably unnecessary, GC attempts. 659 JavaThread* jthr = JavaThread::current(); 660 if (!jthr->in_critical()) { 661 MutexUnlocker mul(Heap_lock); 662 GC_locker::stall_until_clear(); 663 continue; 664 } else { 665 if (CheckJNICalls) { 666 fatal("Possible deadlock due to allocating while" 667 " in jni critical section"); 668 } 669 return NULL; 670 } 671 } 672 } 673 674 if (result == NULL) { 675 676 // Exit the loop if the gc time limit has been exceeded. 677 // The allocation must have failed above (result must be NULL), 678 // and the most recent collection must have exceeded the 679 // gc time limit. Exit the loop so that an out-of-memory 680 // will be thrown (returning a NULL will do that), but 681 // clear gc_overhead_limit_exceeded so that the next collection 682 // will succeeded if the applications decides to handle the 683 // out-of-memory and tries to go on. 684 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 685 if (limit_exceeded) { 686 size_policy()->set_gc_overhead_limit_exceeded(false); 687 if (PrintGCDetails && Verbose) { 688 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" 689 " return NULL because gc_overhead_limit_exceeded is set"); 690 } 691 assert(result == NULL, "Allocation did not fail"); 692 return NULL; 693 } 694 695 // Generate a VM operation 696 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); 697 VMThread::execute(&op); 698 699 // Did the VM operation execute? If so, return the result directly. 700 // This prevents us from looping until time out on requests that can 701 // not be satisfied. 702 if (op.prologue_succeeded()) { 703 assert(Universe::heap()->is_in_permanent_or_null(op.result()), 704 "result not in heap"); 705 // If GC was locked out during VM operation then retry allocation 706 // and/or stall as necessary. 707 if (op.gc_locked()) { 708 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 709 continue; // retry and/or stall as necessary 710 } 711 // If a NULL results is being returned, an out-of-memory 712 // will be thrown now. Clear the gc_overhead_limit_exceeded 713 // flag to avoid the following situation. 714 // gc_overhead_limit_exceeded is set during a collection 715 // the collection fails to return enough space and an OOM is thrown 716 // a subsequent GC prematurely throws an out-of-memory because 717 // the gc_overhead_limit_exceeded counts did not start 718 // again from 0. 719 if (op.result() == NULL) { 720 size_policy()->reset_gc_overhead_limit_count(); 721 } 722 return op.result(); 723 } 724 } 725 726 // The policy object will prevent us from looping forever. If the 727 // time spent in gc crosses a threshold, we will bail out. 728 loop_count++; 729 if ((QueuedAllocationWarningCount > 0) && 730 (loop_count % QueuedAllocationWarningCount == 0)) { 731 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" 732 " size=%d", loop_count, size); 733 } 734 } while (result == NULL); 735 736 return result; 737 } 738 739 // 740 // This is the policy code for permanent allocations which have failed 741 // and require a collection. Note that just as in failed_mem_allocate, 742 // we do not set collection policy, only where & when to allocate and 743 // collect. 744 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { 745 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 746 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 747 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 748 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 749 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); 750 751 // We assume (and assert!) that an allocation at this point will fail 752 // unless we collect. 753 754 // First level allocation failure. Mark-sweep and allocate in perm gen. 755 GCCauseSetter gccs(this, GCCause::_allocation_failure); 756 invoke_full_gc(false); 757 HeapWord* result = perm_gen()->allocate_permanent(size); 758 759 // Second level allocation failure. We're running out of memory. 760 if (result == NULL) { 761 invoke_full_gc(true); 762 result = perm_gen()->allocate_permanent(size); 763 } 764 765 return result; 766 } 767 768 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 769 CollectedHeap::ensure_parsability(retire_tlabs); 770 young_gen()->eden_space()->ensure_parsability(); 771 } 772 773 size_t ParallelScavengeHeap::unsafe_max_alloc() { 774 return young_gen()->eden_space()->free_in_bytes(); 775 } 776 777 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 778 return young_gen()->eden_space()->tlab_capacity(thr); 779 } 780 781 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 782 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 783 } 784 785 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 786 return young_gen()->allocate(size); 787 } 788 789 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 790 CollectedHeap::accumulate_statistics_all_tlabs(); 791 } 792 793 void ParallelScavengeHeap::resize_all_tlabs() { 794 CollectedHeap::resize_all_tlabs(); 795 } 796 797 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 798 // We don't need barriers for stores to objects in the 799 // young gen and, a fortiori, for initializing stores to 800 // objects therein. 801 return is_in_young(new_obj); 802 } 803 804 // This method is used by System.gc() and JVMTI. 805 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 806 assert(!Heap_lock->owned_by_self(), 807 "this thread should not own the Heap_lock"); 808 809 unsigned int gc_count = 0; 810 unsigned int full_gc_count = 0; 811 { 812 MutexLocker ml(Heap_lock); 813 // This value is guarded by the Heap_lock 814 gc_count = Universe::heap()->total_collections(); 815 full_gc_count = Universe::heap()->total_full_collections(); 816 } 817 818 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 819 VMThread::execute(&op); 820 } 821 822 // This interface assumes that it's being called by the 823 // vm thread. It collects the heap assuming that the 824 // heap lock is already held and that we are executing in 825 // the context of the vm thread. 826 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { 827 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 828 assert(Heap_lock->is_locked(), "Precondition#2"); 829 GCCauseSetter gcs(this, cause); 830 switch (cause) { 831 case GCCause::_heap_inspection: 832 case GCCause::_heap_dump: { 833 HandleMark hm; 834 invoke_full_gc(false); 835 break; 836 } 837 default: // XXX FIX ME 838 ShouldNotReachHere(); 839 } 840 } 841 842 843 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { 844 Unimplemented(); 845 } 846 847 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 848 young_gen()->object_iterate(cl); 849 old_gen()->object_iterate(cl); 850 perm_gen()->object_iterate(cl); 851 } 852 853 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { 854 Unimplemented(); 855 } 856 857 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { 858 perm_gen()->object_iterate(cl); 859 } 860 861 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 862 if (young_gen()->is_in_reserved(addr)) { 863 assert(young_gen()->is_in(addr), 864 "addr should be in allocated part of young gen"); 865 // called from os::print_location by find or VMError 866 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 867 Unimplemented(); 868 } else if (old_gen()->is_in_reserved(addr)) { 869 assert(old_gen()->is_in(addr), 870 "addr should be in allocated part of old gen"); 871 return old_gen()->start_array()->object_start((HeapWord*)addr); 872 } else if (perm_gen()->is_in_reserved(addr)) { 873 assert(perm_gen()->is_in(addr), 874 "addr should be in allocated part of perm gen"); 875 return perm_gen()->start_array()->object_start((HeapWord*)addr); 876 } 877 return 0; 878 } 879 880 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 881 return oop(addr)->size(); 882 } 883 884 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 885 return block_start(addr) == addr; 886 } 887 888 jlong ParallelScavengeHeap::millis_since_last_gc() { 889 return UseParallelOldGC ? 890 PSParallelCompact::millis_since_last_gc() : 891 PSMarkSweep::millis_since_last_gc(); 892 } 893 894 void ParallelScavengeHeap::prepare_for_verify() { 895 ensure_parsability(false); // no need to retire TLABs for verification 896 } 897 898 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 899 PSOldGen* old = old_gen(); 900 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 901 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 902 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 903 904 PSYoungGen* young = young_gen(); 905 VirtualSpaceSummary young_summary(young->reserved().start(), 906 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 907 908 MutableSpace* eden = young_gen()->eden_space(); 909 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 910 911 MutableSpace* from = young_gen()->from_space(); 912 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 913 914 MutableSpace* to = young_gen()->to_space(); 915 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 916 917 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 918 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 919 } 920 921 VirtualSpaceSummary ParallelScavengeHeap::create_perm_gen_space_summary() { 922 PSVirtualSpace* space = perm_gen()->virtual_space(); 923 return VirtualSpaceSummary( 924 (HeapWord*)space->low_boundary(), 925 (HeapWord*)space->high(), 926 (HeapWord*)space->high_boundary()); 927 } 928 929 void ParallelScavengeHeap::print_on(outputStream* st) const { 930 young_gen()->print_on(st); 931 old_gen()->print_on(st); 932 perm_gen()->print_on(st); 933 } 934 935 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 936 PSScavenge::gc_task_manager()->threads_do(tc); 937 } 938 939 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 940 PSScavenge::gc_task_manager()->print_threads_on(st); 941 } 942 943 void ParallelScavengeHeap::print_tracing_info() const { 944 if (TraceGen0Time) { 945 double time = PSScavenge::accumulated_time()->seconds(); 946 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 947 } 948 if (TraceGen1Time) { 949 double time = PSMarkSweep::accumulated_time()->seconds(); 950 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 951 } 952 } 953 954 955 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { 956 // Why do we need the total_collections()-filter below? 957 if (total_collections() > 0) { 958 if (!silent) { 959 gclog_or_tty->print("permanent "); 960 } 961 perm_gen()->verify(); 962 963 if (!silent) { 964 gclog_or_tty->print("tenured "); 965 } 966 old_gen()->verify(); 967 968 if (!silent) { 969 gclog_or_tty->print("eden "); 970 } 971 young_gen()->verify(); 972 } 973 } 974 975 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 976 if (PrintGCDetails && Verbose) { 977 gclog_or_tty->print(" " SIZE_FORMAT 978 "->" SIZE_FORMAT 979 "(" SIZE_FORMAT ")", 980 prev_used, used(), capacity()); 981 } else { 982 gclog_or_tty->print(" " SIZE_FORMAT "K" 983 "->" SIZE_FORMAT "K" 984 "(" SIZE_FORMAT "K)", 985 prev_used / K, used() / K, capacity() / K); 986 } 987 } 988 989 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { 990 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 991 const PermGenSummary& perm_gen_summary = create_perm_gen_summary(); 992 gc_tracer->report_gc_heap_summary(when, heap_summary, perm_gen_summary); 993 } 994 995 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 996 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 997 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 998 return _psh; 999 } 1000 1001 // Before delegating the resize to the young generation, 1002 // the reserved space for the young and old generations 1003 // may be changed to accomodate the desired resize. 1004 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 1005 size_t survivor_size) { 1006 if (UseAdaptiveGCBoundary) { 1007 if (size_policy()->bytes_absorbed_from_eden() != 0) { 1008 size_policy()->reset_bytes_absorbed_from_eden(); 1009 return; // The generation changed size already. 1010 } 1011 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 1012 } 1013 1014 // Delegate the resize to the generation. 1015 _young_gen->resize(eden_size, survivor_size); 1016 } 1017 1018 // Before delegating the resize to the old generation, 1019 // the reserved space for the young and old generations 1020 // may be changed to accomodate the desired resize. 1021 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 1022 if (UseAdaptiveGCBoundary) { 1023 if (size_policy()->bytes_absorbed_from_eden() != 0) { 1024 size_policy()->reset_bytes_absorbed_from_eden(); 1025 return; // The generation changed size already. 1026 } 1027 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 1028 } 1029 1030 // Delegate the resize to the generation. 1031 _old_gen->resize(desired_free_space); 1032 } 1033 1034 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 1035 // nothing particular 1036 } 1037 1038 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 1039 // nothing particular 1040 } 1041 1042 #ifndef PRODUCT 1043 void ParallelScavengeHeap::record_gen_tops_before_GC() { 1044 if (ZapUnusedHeapArea) { 1045 young_gen()->record_spaces_top(); 1046 old_gen()->record_spaces_top(); 1047 perm_gen()->record_spaces_top(); 1048 } 1049 } 1050 1051 void ParallelScavengeHeap::gen_mangle_unused_area() { 1052 if (ZapUnusedHeapArea) { 1053 young_gen()->eden_space()->mangle_unused_area(); 1054 young_gen()->to_space()->mangle_unused_area(); 1055 young_gen()->from_space()->mangle_unused_area(); 1056 old_gen()->object_space()->mangle_unused_area(); 1057 perm_gen()->object_space()->mangle_unused_area(); 1058 } 1059 } 1060 #endif