1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" 38 #include "memory/gcLocker.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/java.hpp" 42 #include "runtime/vmThread.hpp" 43 44 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 45 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 46 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; 47 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 48 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 49 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 50 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 51 52 static void trace_gen_sizes(const char* const str, 53 size_t pg_min, size_t pg_max, 54 size_t og_min, size_t og_max, 55 size_t yg_min, size_t yg_max) 56 { 57 if (TracePageSizes) { 58 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " 59 SIZE_FORMAT "," SIZE_FORMAT " " 60 SIZE_FORMAT "," SIZE_FORMAT " " 61 SIZE_FORMAT, 62 str, pg_min / K, pg_max / K, 63 og_min / K, og_max / K, 64 yg_min / K, yg_max / K, 65 (pg_max + og_max + yg_max) / K); 66 } 67 } 68 69 jint ParallelScavengeHeap::initialize() { 70 CollectedHeap::pre_initialize(); 71 72 // Cannot be initialized until after the flags are parsed 73 // GenerationSizer flag_parser; 74 _collector_policy = new GenerationSizer(); 75 76 size_t yg_min_size = _collector_policy->min_young_gen_size(); 77 size_t yg_max_size = _collector_policy->max_young_gen_size(); 78 size_t og_min_size = _collector_policy->min_old_gen_size(); 79 size_t og_max_size = _collector_policy->max_old_gen_size(); 80 // Why isn't there a min_perm_gen_size()? 81 size_t pg_min_size = _collector_policy->perm_gen_size(); 82 size_t pg_max_size = _collector_policy->max_perm_gen_size(); 83 84 trace_gen_sizes("ps heap raw", 85 pg_min_size, pg_max_size, 86 og_min_size, og_max_size, 87 yg_min_size, yg_max_size); 88 89 // The ReservedSpace ctor used below requires that the page size for the perm 90 // gen is <= the page size for the rest of the heap (young + old gens). 91 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, 92 yg_max_size + og_max_size, 93 8); 94 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, 95 pg_max_size, 16), 96 og_page_sz); 97 98 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); 99 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); 100 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); 101 102 // Update sizes to reflect the selected page size(s). 103 // 104 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 105 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 106 // move to the common code. 107 yg_min_size = align_size_up(yg_min_size, yg_align); 108 yg_max_size = align_size_up(yg_max_size, yg_align); 109 size_t yg_cur_size = 110 align_size_up(_collector_policy->young_gen_size(), yg_align); 111 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 112 113 og_min_size = align_size_up(og_min_size, og_align); 114 og_max_size = align_size_up(og_max_size, og_align); 115 size_t og_cur_size = 116 align_size_up(_collector_policy->old_gen_size(), og_align); 117 og_cur_size = MAX2(og_cur_size, og_min_size); 118 119 pg_min_size = align_size_up(pg_min_size, pg_align); 120 pg_max_size = align_size_up(pg_max_size, pg_align); 121 size_t pg_cur_size = pg_min_size; 122 123 trace_gen_sizes("ps heap rnd", 124 pg_min_size, pg_max_size, 125 og_min_size, og_max_size, 126 yg_min_size, yg_max_size); 127 128 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; 129 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 130 131 // The main part of the heap (old gen + young gen) can often use a larger page 132 // size than is needed or wanted for the perm gen. Use the "compound 133 // alignment" ReservedSpace ctor to avoid having to use the same page size for 134 // all gens. 135 136 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, 137 og_align, addr); 138 139 if (UseCompressedOops) { 140 if (addr != NULL && !heap_rs.is_reserved()) { 141 // Failed to reserve at specified address - the requested memory 142 // region is taken already, for example, by 'java' launcher. 143 // Try again to reserver heap higher. 144 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); 145 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, 146 og_align, addr); 147 if (addr != NULL && !heap_rs0.is_reserved()) { 148 // Failed to reserve at specified address again - give up. 149 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); 150 assert(addr == NULL, ""); 151 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, 152 og_align, addr); 153 heap_rs = heap_rs1; 154 } else { 155 heap_rs = heap_rs0; 156 } 157 } 158 } 159 160 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, 161 heap_rs.base(), pg_max_size); 162 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 163 og_max_size + yg_max_size, og_page_sz, 164 heap_rs.base() + pg_max_size, 165 heap_rs.size() - pg_max_size); 166 if (!heap_rs.is_reserved()) { 167 vm_shutdown_during_initialization( 168 "Could not reserve enough space for object heap"); 169 return JNI_ENOMEM; 170 } 171 172 _reserved = MemRegion((HeapWord*)heap_rs.base(), 173 (HeapWord*)(heap_rs.base() + heap_rs.size())); 174 175 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 176 _barrier_set = barrier_set; 177 oopDesc::set_bs(_barrier_set); 178 if (_barrier_set == NULL) { 179 vm_shutdown_during_initialization( 180 "Could not reserve enough space for barrier set"); 181 return JNI_ENOMEM; 182 } 183 184 // Initial young gen size is 4 Mb 185 // 186 // XXX - what about flag_parser.young_gen_size()? 187 const size_t init_young_size = align_size_up(4 * M, yg_align); 188 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); 189 190 // Split the reserved space into perm gen and the main heap (everything else). 191 // The main heap uses a different alignment. 192 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); 193 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); 194 195 // Make up the generations 196 // Calculate the maximum size that a generation can grow. This 197 // includes growth into the other generation. Note that the 198 // parameter _max_gen_size is kept as the maximum 199 // size of the generation as the boundaries currently stand. 200 // _max_gen_size is still used as that value. 201 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 202 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 203 204 _gens = new AdjoiningGenerations(main_rs, 205 og_cur_size, 206 og_min_size, 207 og_max_size, 208 yg_cur_size, 209 yg_min_size, 210 yg_max_size, 211 yg_align); 212 213 _old_gen = _gens->old_gen(); 214 _young_gen = _gens->young_gen(); 215 216 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 217 const size_t old_capacity = _old_gen->capacity_in_bytes(); 218 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 219 _size_policy = 220 new PSAdaptiveSizePolicy(eden_capacity, 221 initial_promo_size, 222 young_gen()->to_space()->capacity_in_bytes(), 223 intra_heap_alignment(), 224 max_gc_pause_sec, 225 max_gc_minor_pause_sec, 226 GCTimeRatio 227 ); 228 229 _perm_gen = new PSPermGen(perm_rs, 230 pg_align, 231 pg_cur_size, 232 pg_cur_size, 233 pg_max_size, 234 "perm", 2); 235 236 assert(!UseAdaptiveGCBoundary || 237 (old_gen()->virtual_space()->high_boundary() == 238 young_gen()->virtual_space()->low_boundary()), 239 "Boundaries must meet"); 240 // initialize the policy counters - 2 collectors, 3 generations 241 _gc_policy_counters = 242 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 243 _psh = this; 244 245 // Set up the GCTaskManager 246 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 247 248 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 249 return JNI_ENOMEM; 250 } 251 252 return JNI_OK; 253 } 254 255 void ParallelScavengeHeap::post_initialize() { 256 // Need to init the tenuring threshold 257 PSScavenge::initialize(); 258 if (UseParallelOldGC) { 259 PSParallelCompact::post_initialize(); 260 } else { 261 PSMarkSweep::initialize(); 262 } 263 PSPromotionManager::initialize(); 264 } 265 266 void ParallelScavengeHeap::update_counters() { 267 young_gen()->update_counters(); 268 old_gen()->update_counters(); 269 perm_gen()->update_counters(); 270 } 271 272 size_t ParallelScavengeHeap::capacity() const { 273 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 274 return value; 275 } 276 277 size_t ParallelScavengeHeap::used() const { 278 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 279 return value; 280 } 281 282 bool ParallelScavengeHeap::is_maximal_no_gc() const { 283 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 284 } 285 286 287 size_t ParallelScavengeHeap::permanent_capacity() const { 288 return perm_gen()->capacity_in_bytes(); 289 } 290 291 size_t ParallelScavengeHeap::permanent_used() const { 292 return perm_gen()->used_in_bytes(); 293 } 294 295 size_t ParallelScavengeHeap::max_capacity() const { 296 size_t estimated = reserved_region().byte_size(); 297 estimated -= perm_gen()->reserved().byte_size(); 298 if (UseAdaptiveSizePolicy) { 299 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 300 } else { 301 estimated -= young_gen()->to_space()->capacity_in_bytes(); 302 } 303 return MAX2(estimated, capacity()); 304 } 305 306 bool ParallelScavengeHeap::is_in(const void* p) const { 307 if (young_gen()->is_in(p)) { 308 return true; 309 } 310 311 if (old_gen()->is_in(p)) { 312 return true; 313 } 314 315 if (perm_gen()->is_in(p)) { 316 return true; 317 } 318 319 return false; 320 } 321 322 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 323 if (young_gen()->is_in_reserved(p)) { 324 return true; 325 } 326 327 if (old_gen()->is_in_reserved(p)) { 328 return true; 329 } 330 331 if (perm_gen()->is_in_reserved(p)) { 332 return true; 333 } 334 335 return false; 336 } 337 338 // There are two levels of allocation policy here. 339 // 340 // When an allocation request fails, the requesting thread must invoke a VM 341 // operation, transfer control to the VM thread, and await the results of a 342 // garbage collection. That is quite expensive, and we should avoid doing it 343 // multiple times if possible. 344 // 345 // To accomplish this, we have a basic allocation policy, and also a 346 // failed allocation policy. 347 // 348 // The basic allocation policy controls how you allocate memory without 349 // attempting garbage collection. It is okay to grab locks and 350 // expand the heap, if that can be done without coming to a safepoint. 351 // It is likely that the basic allocation policy will not be very 352 // aggressive. 353 // 354 // The failed allocation policy is invoked from the VM thread after 355 // the basic allocation policy is unable to satisfy a mem_allocate 356 // request. This policy needs to cover the entire range of collection, 357 // heap expansion, and out-of-memory conditions. It should make every 358 // attempt to allocate the requested memory. 359 360 // Basic allocation policy. Should never be called at a safepoint, or 361 // from the VM thread. 362 // 363 // This method must handle cases where many mem_allocate requests fail 364 // simultaneously. When that happens, only one VM operation will succeed, 365 // and the rest will not be executed. For that reason, this method loops 366 // during failed allocation attempts. If the java heap becomes exhausted, 367 // we rely on the size_policy object to force a bail out. 368 HeapWord* ParallelScavengeHeap::mem_allocate( 369 size_t size, 370 bool is_noref, 371 bool is_tlab, 372 bool* gc_overhead_limit_was_exceeded) { 373 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 374 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 375 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 376 377 // In general gc_overhead_limit_was_exceeded should be false so 378 // set it so here and reset it to true only if the gc time 379 // limit is being exceeded as checked below. 380 *gc_overhead_limit_was_exceeded = false; 381 382 HeapWord* result = young_gen()->allocate(size, is_tlab); 383 384 uint loop_count = 0; 385 uint gc_count = 0; 386 387 while (result == NULL) { 388 // We don't want to have multiple collections for a single filled generation. 389 // To prevent this, each thread tracks the total_collections() value, and if 390 // the count has changed, does not do a new collection. 391 // 392 // The collection count must be read only while holding the heap lock. VM 393 // operations also hold the heap lock during collections. There is a lock 394 // contention case where thread A blocks waiting on the Heap_lock, while 395 // thread B is holding it doing a collection. When thread A gets the lock, 396 // the collection count has already changed. To prevent duplicate collections, 397 // The policy MUST attempt allocations during the same period it reads the 398 // total_collections() value! 399 { 400 MutexLocker ml(Heap_lock); 401 gc_count = Universe::heap()->total_collections(); 402 403 result = young_gen()->allocate(size, is_tlab); 404 405 // (1) If the requested object is too large to easily fit in the 406 // young_gen, or 407 // (2) If GC is locked out via GCLocker, young gen is full and 408 // the need for a GC already signalled to GCLocker (done 409 // at a safepoint), 410 // ... then, rather than force a safepoint and (a potentially futile) 411 // collection (attempt) for each allocation, try allocation directly 412 // in old_gen. For case (2) above, we may in the future allow 413 // TLAB allocation directly in the old gen. 414 if (result != NULL) { 415 return result; 416 } 417 if (!is_tlab && 418 size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { 419 result = old_gen()->allocate(size, is_tlab); 420 if (result != NULL) { 421 return result; 422 } 423 } 424 if (GC_locker::is_active_and_needs_gc()) { 425 // GC is locked out. If this is a TLAB allocation, 426 // return NULL; the requestor will retry allocation 427 // of an idividual object at a time. 428 if (is_tlab) { 429 return NULL; 430 } 431 432 // If this thread is not in a jni critical section, we stall 433 // the requestor until the critical section has cleared and 434 // GC allowed. When the critical section clears, a GC is 435 // initiated by the last thread exiting the critical section; so 436 // we retry the allocation sequence from the beginning of the loop, 437 // rather than causing more, now probably unnecessary, GC attempts. 438 JavaThread* jthr = JavaThread::current(); 439 if (!jthr->in_critical()) { 440 MutexUnlocker mul(Heap_lock); 441 GC_locker::stall_until_clear(); 442 continue; 443 } else { 444 if (CheckJNICalls) { 445 fatal("Possible deadlock due to allocating while" 446 " in jni critical section"); 447 } 448 return NULL; 449 } 450 } 451 } 452 453 if (result == NULL) { 454 455 // Generate a VM operation 456 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); 457 VMThread::execute(&op); 458 459 // Did the VM operation execute? If so, return the result directly. 460 // This prevents us from looping until time out on requests that can 461 // not be satisfied. 462 if (op.prologue_succeeded()) { 463 assert(Universe::heap()->is_in_or_null(op.result()), 464 "result not in heap"); 465 466 // If GC was locked out during VM operation then retry allocation 467 // and/or stall as necessary. 468 if (op.gc_locked()) { 469 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 470 continue; // retry and/or stall as necessary 471 } 472 473 // Exit the loop if the gc time limit has been exceeded. 474 // The allocation must have failed above ("result" guarding 475 // this path is NULL) and the most recent collection has exceeded the 476 // gc overhead limit (although enough may have been collected to 477 // satisfy the allocation). Exit the loop so that an out-of-memory 478 // will be thrown (return a NULL ignoring the contents of 479 // op.result()), 480 // but clear gc_overhead_limit_exceeded so that the next collection 481 // starts with a clean slate (i.e., forgets about previous overhead 482 // excesses). Fill op.result() with a filler object so that the 483 // heap remains parsable. 484 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 485 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 486 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); 487 if (limit_exceeded && softrefs_clear) { 488 *gc_overhead_limit_was_exceeded = true; 489 size_policy()->set_gc_overhead_limit_exceeded(false); 490 if (PrintGCDetails && Verbose) { 491 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 492 "return NULL because gc_overhead_limit_exceeded is set"); 493 } 494 if (op.result() != NULL) { 495 CollectedHeap::fill_with_object(op.result(), size); 496 } 497 return NULL; 498 } 499 500 return op.result(); 501 } 502 } 503 504 // The policy object will prevent us from looping forever. If the 505 // time spent in gc crosses a threshold, we will bail out. 506 loop_count++; 507 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 508 (loop_count % QueuedAllocationWarningCount == 0)) { 509 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 510 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); 511 } 512 } 513 514 return result; 515 } 516 517 // Failed allocation policy. Must be called from the VM thread, and 518 // only at a safepoint! Note that this method has policy for allocation 519 // flow, and NOT collection policy. So we do not check for gc collection 520 // time over limit here, that is the responsibility of the heap specific 521 // collection methods. This method decides where to attempt allocations, 522 // and when to attempt collections, but no collection specific policy. 523 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { 524 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 525 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 526 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 527 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 528 529 size_t mark_sweep_invocation_count = total_invocations(); 530 531 // We assume (and assert!) that an allocation at this point will fail 532 // unless we collect. 533 534 // First level allocation failure, scavenge and allocate in young gen. 535 GCCauseSetter gccs(this, GCCause::_allocation_failure); 536 PSScavenge::invoke(); 537 HeapWord* result = young_gen()->allocate(size, is_tlab); 538 539 // Second level allocation failure. 540 // Mark sweep and allocate in young generation. 541 if (result == NULL) { 542 // There is some chance the scavenge method decided to invoke mark_sweep. 543 // Don't mark sweep twice if so. 544 if (mark_sweep_invocation_count == total_invocations()) { 545 invoke_full_gc(false); 546 result = young_gen()->allocate(size, is_tlab); 547 } 548 } 549 550 // Third level allocation failure. 551 // After mark sweep and young generation allocation failure, 552 // allocate in old generation. 553 if (result == NULL && !is_tlab) { 554 result = old_gen()->allocate(size, is_tlab); 555 } 556 557 // Fourth level allocation failure. We're running out of memory. 558 // More complete mark sweep and allocate in young generation. 559 if (result == NULL) { 560 invoke_full_gc(true); 561 result = young_gen()->allocate(size, is_tlab); 562 } 563 564 // Fifth level allocation failure. 565 // After more complete mark sweep, allocate in old generation. 566 if (result == NULL && !is_tlab) { 567 result = old_gen()->allocate(size, is_tlab); 568 } 569 570 return result; 571 } 572 573 // 574 // This is the policy loop for allocating in the permanent generation. 575 // If the initial allocation fails, we create a vm operation which will 576 // cause a collection. 577 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { 578 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 579 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 580 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 581 582 HeapWord* result; 583 584 uint loop_count = 0; 585 uint gc_count = 0; 586 uint full_gc_count = 0; 587 588 do { 589 // We don't want to have multiple collections for a single filled generation. 590 // To prevent this, each thread tracks the total_collections() value, and if 591 // the count has changed, does not do a new collection. 592 // 593 // The collection count must be read only while holding the heap lock. VM 594 // operations also hold the heap lock during collections. There is a lock 595 // contention case where thread A blocks waiting on the Heap_lock, while 596 // thread B is holding it doing a collection. When thread A gets the lock, 597 // the collection count has already changed. To prevent duplicate collections, 598 // The policy MUST attempt allocations during the same period it reads the 599 // total_collections() value! 600 { 601 MutexLocker ml(Heap_lock); 602 gc_count = Universe::heap()->total_collections(); 603 full_gc_count = Universe::heap()->total_full_collections(); 604 605 result = perm_gen()->allocate_permanent(size); 606 607 if (result != NULL) { 608 return result; 609 } 610 611 if (GC_locker::is_active_and_needs_gc()) { 612 // If this thread is not in a jni critical section, we stall 613 // the requestor until the critical section has cleared and 614 // GC allowed. When the critical section clears, a GC is 615 // initiated by the last thread exiting the critical section; so 616 // we retry the allocation sequence from the beginning of the loop, 617 // rather than causing more, now probably unnecessary, GC attempts. 618 JavaThread* jthr = JavaThread::current(); 619 if (!jthr->in_critical()) { 620 MutexUnlocker mul(Heap_lock); 621 GC_locker::stall_until_clear(); 622 continue; 623 } else { 624 if (CheckJNICalls) { 625 fatal("Possible deadlock due to allocating while" 626 " in jni critical section"); 627 } 628 return NULL; 629 } 630 } 631 } 632 633 if (result == NULL) { 634 635 // Exit the loop if the gc time limit has been exceeded. 636 // The allocation must have failed above (result must be NULL), 637 // and the most recent collection must have exceeded the 638 // gc time limit. Exit the loop so that an out-of-memory 639 // will be thrown (returning a NULL will do that), but 640 // clear gc_overhead_limit_exceeded so that the next collection 641 // will succeeded if the applications decides to handle the 642 // out-of-memory and tries to go on. 643 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 644 if (limit_exceeded) { 645 size_policy()->set_gc_overhead_limit_exceeded(false); 646 if (PrintGCDetails && Verbose) { 647 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" 648 " return NULL because gc_overhead_limit_exceeded is set"); 649 } 650 assert(result == NULL, "Allocation did not fail"); 651 return NULL; 652 } 653 654 // Generate a VM operation 655 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); 656 VMThread::execute(&op); 657 658 // Did the VM operation execute? If so, return the result directly. 659 // This prevents us from looping until time out on requests that can 660 // not be satisfied. 661 if (op.prologue_succeeded()) { 662 assert(Universe::heap()->is_in_permanent_or_null(op.result()), 663 "result not in heap"); 664 // If GC was locked out during VM operation then retry allocation 665 // and/or stall as necessary. 666 if (op.gc_locked()) { 667 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 668 continue; // retry and/or stall as necessary 669 } 670 // If a NULL results is being returned, an out-of-memory 671 // will be thrown now. Clear the gc_overhead_limit_exceeded 672 // flag to avoid the following situation. 673 // gc_overhead_limit_exceeded is set during a collection 674 // the collection fails to return enough space and an OOM is thrown 675 // a subsequent GC prematurely throws an out-of-memory because 676 // the gc_overhead_limit_exceeded counts did not start 677 // again from 0. 678 if (op.result() == NULL) { 679 size_policy()->reset_gc_overhead_limit_count(); 680 } 681 return op.result(); 682 } 683 } 684 685 // The policy object will prevent us from looping forever. If the 686 // time spent in gc crosses a threshold, we will bail out. 687 loop_count++; 688 if ((QueuedAllocationWarningCount > 0) && 689 (loop_count % QueuedAllocationWarningCount == 0)) { 690 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" 691 " size=%d", loop_count, size); 692 } 693 } while (result == NULL); 694 695 return result; 696 } 697 698 // 699 // This is the policy code for permanent allocations which have failed 700 // and require a collection. Note that just as in failed_mem_allocate, 701 // we do not set collection policy, only where & when to allocate and 702 // collect. 703 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { 704 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 705 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 706 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 707 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 708 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); 709 710 // We assume (and assert!) that an allocation at this point will fail 711 // unless we collect. 712 713 // First level allocation failure. Mark-sweep and allocate in perm gen. 714 GCCauseSetter gccs(this, GCCause::_allocation_failure); 715 invoke_full_gc(false); 716 HeapWord* result = perm_gen()->allocate_permanent(size); 717 718 // Second level allocation failure. We're running out of memory. 719 if (result == NULL) { 720 invoke_full_gc(true); 721 result = perm_gen()->allocate_permanent(size); 722 } 723 724 return result; 725 } 726 727 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 728 CollectedHeap::ensure_parsability(retire_tlabs); 729 young_gen()->eden_space()->ensure_parsability(); 730 } 731 732 size_t ParallelScavengeHeap::unsafe_max_alloc() { 733 return young_gen()->eden_space()->free_in_bytes(); 734 } 735 736 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 737 return young_gen()->eden_space()->tlab_capacity(thr); 738 } 739 740 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 741 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 742 } 743 744 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 745 return young_gen()->allocate(size, true); 746 } 747 748 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 749 CollectedHeap::accumulate_statistics_all_tlabs(); 750 } 751 752 void ParallelScavengeHeap::resize_all_tlabs() { 753 CollectedHeap::resize_all_tlabs(); 754 } 755 756 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 757 // We don't need barriers for stores to objects in the 758 // young gen and, a fortiori, for initializing stores to 759 // objects therein. 760 return is_in_young(new_obj); 761 } 762 763 // This method is used by System.gc() and JVMTI. 764 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 765 assert(!Heap_lock->owned_by_self(), 766 "this thread should not own the Heap_lock"); 767 768 unsigned int gc_count = 0; 769 unsigned int full_gc_count = 0; 770 { 771 MutexLocker ml(Heap_lock); 772 // This value is guarded by the Heap_lock 773 gc_count = Universe::heap()->total_collections(); 774 full_gc_count = Universe::heap()->total_full_collections(); 775 } 776 777 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 778 VMThread::execute(&op); 779 } 780 781 // This interface assumes that it's being called by the 782 // vm thread. It collects the heap assuming that the 783 // heap lock is already held and that we are executing in 784 // the context of the vm thread. 785 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { 786 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 787 assert(Heap_lock->is_locked(), "Precondition#2"); 788 GCCauseSetter gcs(this, cause); 789 switch (cause) { 790 case GCCause::_heap_inspection: 791 case GCCause::_heap_dump: { 792 HandleMark hm; 793 invoke_full_gc(false); 794 break; 795 } 796 default: // XXX FIX ME 797 ShouldNotReachHere(); 798 } 799 } 800 801 802 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { 803 Unimplemented(); 804 } 805 806 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 807 young_gen()->object_iterate(cl); 808 old_gen()->object_iterate(cl); 809 perm_gen()->object_iterate(cl); 810 } 811 812 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { 813 Unimplemented(); 814 } 815 816 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { 817 perm_gen()->object_iterate(cl); 818 } 819 820 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 821 if (young_gen()->is_in_reserved(addr)) { 822 assert(young_gen()->is_in(addr), 823 "addr should be in allocated part of young gen"); 824 if (Debugging) return NULL; // called from find() in debug.cpp 825 Unimplemented(); 826 } else if (old_gen()->is_in_reserved(addr)) { 827 assert(old_gen()->is_in(addr), 828 "addr should be in allocated part of old gen"); 829 return old_gen()->start_array()->object_start((HeapWord*)addr); 830 } else if (perm_gen()->is_in_reserved(addr)) { 831 assert(perm_gen()->is_in(addr), 832 "addr should be in allocated part of perm gen"); 833 return perm_gen()->start_array()->object_start((HeapWord*)addr); 834 } 835 return 0; 836 } 837 838 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 839 return oop(addr)->size(); 840 } 841 842 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 843 return block_start(addr) == addr; 844 } 845 846 jlong ParallelScavengeHeap::millis_since_last_gc() { 847 return UseParallelOldGC ? 848 PSParallelCompact::millis_since_last_gc() : 849 PSMarkSweep::millis_since_last_gc(); 850 } 851 852 void ParallelScavengeHeap::prepare_for_verify() { 853 ensure_parsability(false); // no need to retire TLABs for verification 854 } 855 856 void ParallelScavengeHeap::print() const { print_on(tty); } 857 858 void ParallelScavengeHeap::print_on(outputStream* st) const { 859 young_gen()->print_on(st); 860 old_gen()->print_on(st); 861 perm_gen()->print_on(st); 862 } 863 864 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 865 PSScavenge::gc_task_manager()->threads_do(tc); 866 } 867 868 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 869 PSScavenge::gc_task_manager()->print_threads_on(st); 870 } 871 872 void ParallelScavengeHeap::print_tracing_info() const { 873 if (TraceGen0Time) { 874 double time = PSScavenge::accumulated_time()->seconds(); 875 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 876 } 877 if (TraceGen1Time) { 878 double time = PSMarkSweep::accumulated_time()->seconds(); 879 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 880 } 881 } 882 883 884 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { 885 // Why do we need the total_collections()-filter below? 886 if (total_collections() > 0) { 887 if (!silent) { 888 gclog_or_tty->print("permanent "); 889 } 890 perm_gen()->verify(allow_dirty); 891 892 if (!silent) { 893 gclog_or_tty->print("tenured "); 894 } 895 old_gen()->verify(allow_dirty); 896 897 if (!silent) { 898 gclog_or_tty->print("eden "); 899 } 900 young_gen()->verify(allow_dirty); 901 } 902 if (!silent) { 903 gclog_or_tty->print("ref_proc "); 904 } 905 ReferenceProcessor::verify(); 906 } 907 908 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 909 if (PrintGCDetails && Verbose) { 910 gclog_or_tty->print(" " SIZE_FORMAT 911 "->" SIZE_FORMAT 912 "(" SIZE_FORMAT ")", 913 prev_used, used(), capacity()); 914 } else { 915 gclog_or_tty->print(" " SIZE_FORMAT "K" 916 "->" SIZE_FORMAT "K" 917 "(" SIZE_FORMAT "K)", 918 prev_used / K, used() / K, capacity() / K); 919 } 920 } 921 922 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 923 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 924 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 925 return _psh; 926 } 927 928 // Before delegating the resize to the young generation, 929 // the reserved space for the young and old generations 930 // may be changed to accomodate the desired resize. 931 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 932 size_t survivor_size) { 933 if (UseAdaptiveGCBoundary) { 934 if (size_policy()->bytes_absorbed_from_eden() != 0) { 935 size_policy()->reset_bytes_absorbed_from_eden(); 936 return; // The generation changed size already. 937 } 938 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 939 } 940 941 // Delegate the resize to the generation. 942 _young_gen->resize(eden_size, survivor_size); 943 } 944 945 // Before delegating the resize to the old generation, 946 // the reserved space for the young and old generations 947 // may be changed to accomodate the desired resize. 948 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 949 if (UseAdaptiveGCBoundary) { 950 if (size_policy()->bytes_absorbed_from_eden() != 0) { 951 size_policy()->reset_bytes_absorbed_from_eden(); 952 return; // The generation changed size already. 953 } 954 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 955 } 956 957 // Delegate the resize to the generation. 958 _old_gen->resize(desired_free_space); 959 } 960 961 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 962 // nothing particular 963 } 964 965 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 966 // nothing particular 967 } 968 969 #ifndef PRODUCT 970 void ParallelScavengeHeap::record_gen_tops_before_GC() { 971 if (ZapUnusedHeapArea) { 972 young_gen()->record_spaces_top(); 973 old_gen()->record_spaces_top(); 974 perm_gen()->record_spaces_top(); 975 } 976 } 977 978 void ParallelScavengeHeap::gen_mangle_unused_area() { 979 if (ZapUnusedHeapArea) { 980 young_gen()->eden_space()->mangle_unused_area(); 981 young_gen()->to_space()->mangle_unused_area(); 982 young_gen()->from_space()->mangle_unused_area(); 983 old_gen()->object_space()->mangle_unused_area(); 984 perm_gen()->object_space()->mangle_unused_area(); 985 } 986 } 987 #endif