1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)parallelScavengeHeap.cpp 1.95 07/10/04 10:49:31 JVM" 3 #endif 4 /* 5 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 # include "incls/_precompiled.incl" 29 # include "incls/_parallelScavengeHeap.cpp.incl" 30 31 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 32 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 33 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; 34 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 35 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 36 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 37 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 38 39 static void trace_gen_sizes(const char* const str, 40 size_t pg_min, size_t pg_max, 41 size_t og_min, size_t og_max, 42 size_t yg_min, size_t yg_max) 43 { 44 if (TracePageSizes) { 45 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " 46 SIZE_FORMAT "," SIZE_FORMAT " " 47 SIZE_FORMAT "," SIZE_FORMAT " " 48 SIZE_FORMAT, 49 str, pg_min / K, pg_max / K, 50 og_min / K, og_max / K, 51 yg_min / K, yg_max / K, 52 (pg_max + og_max + yg_max) / K); 53 } 54 } 55 56 jint ParallelScavengeHeap::initialize() { 57 // Cannot be initialized until after the flags are parsed 58 GenerationSizer flag_parser; 59 60 size_t yg_min_size = flag_parser.min_young_gen_size(); 61 size_t yg_max_size = flag_parser.max_young_gen_size(); 62 size_t og_min_size = flag_parser.min_old_gen_size(); 63 size_t og_max_size = flag_parser.max_old_gen_size(); 64 // Why isn't there a min_perm_gen_size()? 65 size_t pg_min_size = flag_parser.perm_gen_size(); 66 size_t pg_max_size = flag_parser.max_perm_gen_size(); 67 68 trace_gen_sizes("ps heap raw", 69 pg_min_size, pg_max_size, 70 og_min_size, og_max_size, 71 yg_min_size, yg_max_size); 72 73 // The ReservedSpace ctor used below requires that the page size for the perm 74 // gen is <= the page size for the rest of the heap (young + old gens). 75 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, 76 yg_max_size + og_max_size, 77 8); 78 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, 79 pg_max_size, 16), 80 og_page_sz); 81 82 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); 83 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); 84 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); 85 86 // Update sizes to reflect the selected page size(s). 87 // 88 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 89 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 90 // move to the common code. 91 yg_min_size = align_size_up(yg_min_size, yg_align); 92 yg_max_size = align_size_up(yg_max_size, yg_align); 93 size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); 94 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 95 96 og_min_size = align_size_up(og_min_size, og_align); 97 og_max_size = align_size_up(og_max_size, og_align); 98 size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); 99 og_cur_size = MAX2(og_cur_size, og_min_size); 100 101 pg_min_size = align_size_up(pg_min_size, pg_align); 102 pg_max_size = align_size_up(pg_max_size, pg_align); 103 size_t pg_cur_size = pg_min_size; 104 105 trace_gen_sizes("ps heap rnd", 106 pg_min_size, pg_max_size, 107 og_min_size, og_max_size, 108 yg_min_size, yg_max_size); 109 110 // The main part of the heap (old gen + young gen) can often use a larger page 111 // size than is needed or wanted for the perm gen. Use the "compound 112 // alignment" ReservedSpace ctor to avoid having to use the same page size for 113 // all gens. 114 ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, 115 og_align); 116 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, 117 heap_rs.base(), pg_max_size); 118 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 119 og_max_size + yg_max_size, og_page_sz, 120 heap_rs.base() + pg_max_size, 121 heap_rs.size() - pg_max_size); 122 if (!heap_rs.is_reserved()) { 123 vm_shutdown_during_initialization( 124 "Could not reserve enough space for object heap"); 125 return JNI_ENOMEM; 126 } 127 128 _reserved = MemRegion((HeapWord*)heap_rs.base(), 129 (HeapWord*)(heap_rs.base() + heap_rs.size())); 130 131 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 132 _barrier_set = barrier_set; 133 oopDesc::set_bs(_barrier_set); 134 if (_barrier_set == NULL) { 135 vm_shutdown_during_initialization( 136 "Could not reserve enough space for barrier set"); 137 return JNI_ENOMEM; 138 } 139 140 // Initial young gen size is 4 Mb 141 // 142 // XXX - what about flag_parser.young_gen_size()? 143 const size_t init_young_size = align_size_up(4 * M, yg_align); 144 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); 145 146 // Split the reserved space into perm gen and the main heap (everything else). 147 // The main heap uses a different alignment. 148 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); 149 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); 150 151 // Make up the generations 152 // Calculate the maximum size that a generation can grow. This 153 // includes growth into the other generation. Note that the 154 // parameter _max_gen_size is kept as the maximum 155 // size of the generation as the boundaries currently stand. 156 // _max_gen_size is still used as that value. 157 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 158 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 159 160 _gens = new AdjoiningGenerations(main_rs, 161 og_cur_size, 162 og_min_size, 163 og_max_size, 164 yg_cur_size, 165 yg_min_size, 166 yg_max_size, 167 yg_align); 168 169 _old_gen = _gens->old_gen(); 170 _young_gen = _gens->young_gen(); 171 172 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 173 const size_t old_capacity = _old_gen->capacity_in_bytes(); 174 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 175 _size_policy = 176 new PSAdaptiveSizePolicy(eden_capacity, 177 initial_promo_size, 178 young_gen()->to_space()->capacity_in_bytes(), 179 intra_generation_alignment(), 180 max_gc_pause_sec, 181 max_gc_minor_pause_sec, 182 GCTimeRatio 183 ); 184 185 _perm_gen = new PSPermGen(perm_rs, 186 pg_align, 187 pg_cur_size, 188 pg_cur_size, 189 pg_max_size, 190 "perm", 2); 191 192 assert(!UseAdaptiveGCBoundary || 193 (old_gen()->virtual_space()->high_boundary() == 194 young_gen()->virtual_space()->low_boundary()), 195 "Boundaries must meet"); 196 // initialize the policy counters - 2 collectors, 3 generations 197 _gc_policy_counters = 198 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 199 _psh = this; 200 201 // Set up the GCTaskManager 202 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 203 204 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 205 return JNI_ENOMEM; 206 } 207 208 return JNI_OK; 209 } 210 211 void ParallelScavengeHeap::post_initialize() { 212 // Need to init the tenuring threshold 213 PSScavenge::initialize(); 214 if (UseParallelOldGC) { 215 PSParallelCompact::post_initialize(); 216 if (VerifyParallelOldWithMarkSweep) { 217 // Will be used for verification of par old. 218 PSMarkSweep::initialize(); 219 } 220 } else { 221 PSMarkSweep::initialize(); 222 } 223 PSPromotionManager::initialize(); 224 } 225 226 void ParallelScavengeHeap::update_counters() { 227 young_gen()->update_counters(); 228 old_gen()->update_counters(); 229 perm_gen()->update_counters(); 230 } 231 232 size_t ParallelScavengeHeap::capacity() const { 233 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 234 return value; 235 } 236 237 size_t ParallelScavengeHeap::used() const { 238 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 239 return value; 240 } 241 242 bool ParallelScavengeHeap::is_maximal_no_gc() const { 243 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 244 } 245 246 247 size_t ParallelScavengeHeap::permanent_capacity() const { 248 return perm_gen()->capacity_in_bytes(); 249 } 250 251 size_t ParallelScavengeHeap::permanent_used() const { 252 return perm_gen()->used_in_bytes(); 253 } 254 255 size_t ParallelScavengeHeap::max_capacity() const { 256 size_t estimated = reserved_region().byte_size(); 257 estimated -= perm_gen()->reserved().byte_size(); 258 if (UseAdaptiveSizePolicy) { 259 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 260 } else { 261 estimated -= young_gen()->to_space()->capacity_in_bytes(); 262 } 263 return MAX2(estimated, capacity()); 264 } 265 266 bool ParallelScavengeHeap::is_in(const void* p) const { 267 if (young_gen()->is_in(p)) { 268 return true; 269 } 270 271 if (old_gen()->is_in(p)) { 272 return true; 273 } 274 275 if (perm_gen()->is_in(p)) { 276 return true; 277 } 278 279 return false; 280 } 281 282 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 283 if (young_gen()->is_in_reserved(p)) { 284 return true; 285 } 286 287 if (old_gen()->is_in_reserved(p)) { 288 return true; 289 } 290 291 if (perm_gen()->is_in_reserved(p)) { 292 return true; 293 } 294 295 return false; 296 } 297 298 // Static method 299 bool ParallelScavengeHeap::is_in_young(oop* p) { 300 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 301 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 302 "Must be ParallelScavengeHeap"); 303 304 PSYoungGen* young_gen = heap->young_gen(); 305 306 if (young_gen->is_in_reserved(p)) { 307 return true; 308 } 309 310 return false; 311 } 312 313 // Static method 314 bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) { 315 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 316 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, 317 "Must be ParallelScavengeHeap"); 318 319 PSOldGen* old_gen = heap->old_gen(); 320 PSPermGen* perm_gen = heap->perm_gen(); 321 322 if (old_gen->is_in_reserved(p)) { 323 return true; 324 } 325 326 if (perm_gen->is_in_reserved(p)) { 327 return true; 328 } 329 330 return false; 331 } 332 333 // There are two levels of allocation policy here. 334 // 335 // When an allocation request fails, the requesting thread must invoke a VM 336 // operation, transfer control to the VM thread, and await the results of a 337 // garbage collection. That is quite expensive, and we should avoid doing it 338 // multiple times if possible. 339 // 340 // To accomplish this, we have a basic allocation policy, and also a 341 // failed allocation policy. 342 // 343 // The basic allocation policy controls how you allocate memory without 344 // attempting garbage collection. It is okay to grab locks and 345 // expand the heap, if that can be done without coming to a safepoint. 346 // It is likely that the basic allocation policy will not be very 347 // aggressive. 348 // 349 // The failed allocation policy is invoked from the VM thread after 350 // the basic allocation policy is unable to satisfy a mem_allocate 351 // request. This policy needs to cover the entire range of collection, 352 // heap expansion, and out-of-memory conditions. It should make every 353 // attempt to allocate the requested memory. 354 355 // Basic allocation policy. Should never be called at a safepoint, or 356 // from the VM thread. 357 // 358 // This method must handle cases where many mem_allocate requests fail 359 // simultaneously. When that happens, only one VM operation will succeed, 360 // and the rest will not be executed. For that reason, this method loops 361 // during failed allocation attempts. If the java heap becomes exhausted, 362 // we rely on the size_policy object to force a bail out. 363 HeapWord* ParallelScavengeHeap::mem_allocate( 364 size_t size, 365 bool is_noref, 366 bool is_tlab, 367 bool* gc_overhead_limit_was_exceeded) { 368 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 369 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 370 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 371 372 HeapWord* result = young_gen()->allocate(size, is_tlab); 373 374 uint loop_count = 0; 375 uint gc_count = 0; 376 377 while (result == NULL) { 378 // We don't want to have multiple collections for a single filled generation. 379 // To prevent this, each thread tracks the total_collections() value, and if 380 // the count has changed, does not do a new collection. 381 // 382 // The collection count must be read only while holding the heap lock. VM 383 // operations also hold the heap lock during collections. There is a lock 384 // contention case where thread A blocks waiting on the Heap_lock, while 385 // thread B is holding it doing a collection. When thread A gets the lock, 386 // the collection count has already changed. To prevent duplicate collections, 387 // The policy MUST attempt allocations during the same period it reads the 388 // total_collections() value! 389 { 390 MutexLocker ml(Heap_lock); 391 gc_count = Universe::heap()->total_collections(); 392 393 result = young_gen()->allocate(size, is_tlab); 394 395 // (1) If the requested object is too large to easily fit in the 396 // young_gen, or 397 // (2) If GC is locked out via GCLocker, young gen is full and 398 // the need for a GC already signalled to GCLocker (done 399 // at a safepoint), 400 // ... then, rather than force a safepoint and (a potentially futile) 401 // collection (attempt) for each allocation, try allocation directly 402 // in old_gen. For case (2) above, we may in the future allow 403 // TLAB allocation directly in the old gen. 404 if (result != NULL) { 405 return result; 406 } 407 if (!is_tlab && 408 size >= (young_gen()->eden_space()->capacity_in_words() / 2)) { 409 result = old_gen()->allocate(size, is_tlab); 410 if (result != NULL) { 411 return result; 412 } 413 } 414 if (GC_locker::is_active_and_needs_gc()) { 415 // GC is locked out. If this is a TLAB allocation, 416 // return NULL; the requestor will retry allocation 417 // of an idividual object at a time. 418 if (is_tlab) { 419 return NULL; 420 } 421 422 // If this thread is not in a jni critical section, we stall 423 // the requestor until the critical section has cleared and 424 // GC allowed. When the critical section clears, a GC is 425 // initiated by the last thread exiting the critical section; so 426 // we retry the allocation sequence from the beginning of the loop, 427 // rather than causing more, now probably unnecessary, GC attempts. 428 JavaThread* jthr = JavaThread::current(); 429 if (!jthr->in_critical()) { 430 MutexUnlocker mul(Heap_lock); 431 GC_locker::stall_until_clear(); 432 continue; 433 } else { 434 if (CheckJNICalls) { 435 fatal("Possible deadlock due to allocating while" 436 " in jni critical section"); 437 } 438 return NULL; 439 } 440 } 441 } 442 443 if (result == NULL) { 444 445 // Exit the loop if if the gc time limit has been exceeded. 446 // The allocation must have failed above (result must be NULL), 447 // and the most recent collection must have exceeded the 448 // gc time limit. Exit the loop so that an out-of-memory 449 // will be thrown (returning a NULL will do that), but 450 // clear gc_time_limit_exceeded so that the next collection 451 // will succeeded if the applications decides to handle the 452 // out-of-memory and tries to go on. 453 *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded(); 454 if (size_policy()->gc_time_limit_exceeded()) { 455 size_policy()->set_gc_time_limit_exceeded(false); 456 if (PrintGCDetails && Verbose) { 457 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 458 "return NULL because gc_time_limit_exceeded is set"); 459 } 460 return NULL; 461 } 462 463 // Generate a VM operation 464 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); 465 VMThread::execute(&op); 466 467 // Did the VM operation execute? If so, return the result directly. 468 // This prevents us from looping until time out on requests that can 469 // not be satisfied. 470 if (op.prologue_succeeded()) { 471 assert(Universe::heap()->is_in_or_null(op.result()), 472 "result not in heap"); 473 474 // If GC was locked out during VM operation then retry allocation 475 // and/or stall as necessary. 476 if (op.gc_locked()) { 477 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 478 continue; // retry and/or stall as necessary 479 } 480 // If a NULL result is being returned, an out-of-memory 481 // will be thrown now. Clear the gc_time_limit_exceeded 482 // flag to avoid the following situation. 483 // gc_time_limit_exceeded is set during a collection 484 // the collection fails to return enough space and an OOM is thrown 485 // the next GC is skipped because the gc_time_limit_exceeded 486 // flag is set and another OOM is thrown 487 if (op.result() == NULL) { 488 size_policy()->set_gc_time_limit_exceeded(false); 489 } 490 return op.result(); 491 } 492 } 493 494 // The policy object will prevent us from looping forever. If the 495 // time spent in gc crosses a threshold, we will bail out. 496 loop_count++; 497 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 498 (loop_count % QueuedAllocationWarningCount == 0)) { 499 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 500 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); 501 } 502 } 503 504 return result; 505 } 506 507 // Failed allocation policy. Must be called from the VM thread, and 508 // only at a safepoint! Note that this method has policy for allocation 509 // flow, and NOT collection policy. So we do not check for gc collection 510 // time over limit here, that is the responsibility of the heap specific 511 // collection methods. This method decides where to attempt allocations, 512 // and when to attempt collections, but no collection specific policy. 513 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { 514 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 515 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 516 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 517 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 518 519 size_t mark_sweep_invocation_count = total_invocations(); 520 521 // We assume (and assert!) that an allocation at this point will fail 522 // unless we collect. 523 524 // First level allocation failure, scavenge and allocate in young gen. 525 GCCauseSetter gccs(this, GCCause::_allocation_failure); 526 PSScavenge::invoke(); 527 HeapWord* result = young_gen()->allocate(size, is_tlab); 528 529 // Second level allocation failure. 530 // Mark sweep and allocate in young generation. 531 if (result == NULL) { 532 // There is some chance the scavenge method decided to invoke mark_sweep. 533 // Don't mark sweep twice if so. 534 if (mark_sweep_invocation_count == total_invocations()) { 535 invoke_full_gc(false); 536 result = young_gen()->allocate(size, is_tlab); 537 } 538 } 539 540 // Third level allocation failure. 541 // After mark sweep and young generation allocation failure, 542 // allocate in old generation. 543 if (result == NULL && !is_tlab) { 544 result = old_gen()->allocate(size, is_tlab); 545 } 546 547 // Fourth level allocation failure. We're running out of memory. 548 // More complete mark sweep and allocate in young generation. 549 if (result == NULL) { 550 invoke_full_gc(true); 551 result = young_gen()->allocate(size, is_tlab); 552 } 553 554 // Fifth level allocation failure. 555 // After more complete mark sweep, allocate in old generation. 556 if (result == NULL && !is_tlab) { 557 result = old_gen()->allocate(size, is_tlab); 558 } 559 560 return result; 561 } 562 563 // 564 // This is the policy loop for allocating in the permanent generation. 565 // If the initial allocation fails, we create a vm operation which will 566 // cause a collection. 567 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { 568 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 569 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 570 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 571 572 HeapWord* result; 573 574 uint loop_count = 0; 575 uint gc_count = 0; 576 uint full_gc_count = 0; 577 578 do { 579 // We don't want to have multiple collections for a single filled generation. 580 // To prevent this, each thread tracks the total_collections() value, and if 581 // the count has changed, does not do a new collection. 582 // 583 // The collection count must be read only while holding the heap lock. VM 584 // operations also hold the heap lock during collections. There is a lock 585 // contention case where thread A blocks waiting on the Heap_lock, while 586 // thread B is holding it doing a collection. When thread A gets the lock, 587 // the collection count has already changed. To prevent duplicate collections, 588 // The policy MUST attempt allocations during the same period it reads the 589 // total_collections() value! 590 { 591 MutexLocker ml(Heap_lock); 592 gc_count = Universe::heap()->total_collections(); 593 full_gc_count = Universe::heap()->total_full_collections(); 594 595 result = perm_gen()->allocate_permanent(size); 596 } 597 598 if (result == NULL) { 599 600 // Exit the loop if the gc time limit has been exceeded. 601 // The allocation must have failed above (result must be NULL), 602 // and the most recent collection must have exceeded the 603 // gc time limit. Exit the loop so that an out-of-memory 604 // will be thrown (returning a NULL will do that), but 605 // clear gc_time_limit_exceeded so that the next collection 606 // will succeeded if the applications decides to handle the 607 // out-of-memory and tries to go on. 608 if (size_policy()->gc_time_limit_exceeded()) { 609 size_policy()->set_gc_time_limit_exceeded(false); 610 if (PrintGCDetails && Verbose) { 611 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " 612 "return NULL because gc_time_limit_exceeded is set"); 613 } 614 assert(result == NULL, "Allocation did not fail"); 615 return NULL; 616 } 617 618 // Generate a VM operation 619 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); 620 VMThread::execute(&op); 621 622 // Did the VM operation execute? If so, return the result directly. 623 // This prevents us from looping until time out on requests that can 624 // not be satisfied. 625 if (op.prologue_succeeded()) { 626 assert(Universe::heap()->is_in_permanent_or_null(op.result()), 627 "result not in heap"); 628 // If a NULL results is being returned, an out-of-memory 629 // will be thrown now. Clear the gc_time_limit_exceeded 630 // flag to avoid the following situation. 631 // gc_time_limit_exceeded is set during a collection 632 // the collection fails to return enough space and an OOM is thrown 633 // the next GC is skipped because the gc_time_limit_exceeded 634 // flag is set and another OOM is thrown 635 if (op.result() == NULL) { 636 size_policy()->set_gc_time_limit_exceeded(false); 637 } 638 return op.result(); 639 } 640 } 641 642 // The policy object will prevent us from looping forever. If the 643 // time spent in gc crosses a threshold, we will bail out. 644 loop_count++; 645 if ((QueuedAllocationWarningCount > 0) && 646 (loop_count % QueuedAllocationWarningCount == 0)) { 647 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" 648 " size=%d", loop_count, size); 649 } 650 } while (result == NULL); 651 652 return result; 653 } 654 655 // 656 // This is the policy code for permanent allocations which have failed 657 // and require a collection. Note that just as in failed_mem_allocate, 658 // we do not set collection policy, only where & when to allocate and 659 // collect. 660 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { 661 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 662 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 663 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 664 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 665 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); 666 667 // We assume (and assert!) that an allocation at this point will fail 668 // unless we collect. 669 670 // First level allocation failure. Mark-sweep and allocate in perm gen. 671 GCCauseSetter gccs(this, GCCause::_allocation_failure); 672 invoke_full_gc(false); 673 HeapWord* result = perm_gen()->allocate_permanent(size); 674 675 // Second level allocation failure. We're running out of memory. 676 if (result == NULL) { 677 invoke_full_gc(true); 678 result = perm_gen()->allocate_permanent(size); 679 } 680 681 return result; 682 } 683 684 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 685 CollectedHeap::ensure_parsability(retire_tlabs); 686 young_gen()->eden_space()->ensure_parsability(); 687 } 688 689 size_t ParallelScavengeHeap::unsafe_max_alloc() { 690 return young_gen()->eden_space()->free_in_bytes(); 691 } 692 693 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 694 return young_gen()->eden_space()->tlab_capacity(thr); 695 } 696 697 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 698 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 699 } 700 701 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 702 return young_gen()->allocate(size, true); 703 } 704 705 void ParallelScavengeHeap::fill_all_tlabs(bool retire) { 706 CollectedHeap::fill_all_tlabs(retire); 707 } 708 709 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 710 CollectedHeap::accumulate_statistics_all_tlabs(); 711 } 712 713 void ParallelScavengeHeap::resize_all_tlabs() { 714 CollectedHeap::resize_all_tlabs(); 715 } 716 717 // This method is used by System.gc() and JVMTI. 718 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 719 assert(!Heap_lock->owned_by_self(), 720 "this thread should not own the Heap_lock"); 721 722 unsigned int gc_count = 0; 723 unsigned int full_gc_count = 0; 724 { 725 MutexLocker ml(Heap_lock); 726 // This value is guarded by the Heap_lock 727 gc_count = Universe::heap()->total_collections(); 728 full_gc_count = Universe::heap()->total_full_collections(); 729 } 730 731 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 732 VMThread::execute(&op); 733 } 734 735 // This interface assumes that it's being called by the 736 // vm thread. It collects the heap assuming that the 737 // heap lock is already held and that we are executing in 738 // the context of the vm thread. 739 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { 740 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 741 assert(Heap_lock->is_locked(), "Precondition#2"); 742 GCCauseSetter gcs(this, cause); 743 switch (cause) { 744 case GCCause::_heap_inspection: 745 case GCCause::_heap_dump: { 746 HandleMark hm; 747 invoke_full_gc(false); 748 break; 749 } 750 default: // XXX FIX ME 751 ShouldNotReachHere(); 752 } 753 } 754 755 756 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { 757 Unimplemented(); 758 } 759 760 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 761 young_gen()->object_iterate(cl); 762 old_gen()->object_iterate(cl); 763 perm_gen()->object_iterate(cl); 764 } 765 766 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { 767 Unimplemented(); 768 } 769 770 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { 771 perm_gen()->object_iterate(cl); 772 } 773 774 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 775 if (young_gen()->is_in_reserved(addr)) { 776 assert(young_gen()->is_in(addr), 777 "addr should be in allocated part of young gen"); 778 Unimplemented(); 779 } else if (old_gen()->is_in_reserved(addr)) { 780 assert(old_gen()->is_in(addr), 781 "addr should be in allocated part of old gen"); 782 return old_gen()->start_array()->object_start((HeapWord*)addr); 783 } else if (perm_gen()->is_in_reserved(addr)) { 784 assert(perm_gen()->is_in(addr), 785 "addr should be in allocated part of perm gen"); 786 return perm_gen()->start_array()->object_start((HeapWord*)addr); 787 } 788 return 0; 789 } 790 791 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 792 return oop(addr)->size(); 793 } 794 795 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 796 return block_start(addr) == addr; 797 } 798 799 jlong ParallelScavengeHeap::millis_since_last_gc() { 800 return UseParallelOldGC ? 801 PSParallelCompact::millis_since_last_gc() : 802 PSMarkSweep::millis_since_last_gc(); 803 } 804 805 void ParallelScavengeHeap::prepare_for_verify() { 806 ensure_parsability(false); // no need to retire TLABs for verification 807 } 808 809 void ParallelScavengeHeap::print() const { print_on(tty); } 810 811 void ParallelScavengeHeap::print_on(outputStream* st) const { 812 young_gen()->print_on(st); 813 old_gen()->print_on(st); 814 perm_gen()->print_on(st); 815 } 816 817 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 818 PSScavenge::gc_task_manager()->threads_do(tc); 819 } 820 821 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 822 PSScavenge::gc_task_manager()->print_threads_on(st); 823 } 824 825 void ParallelScavengeHeap::print_tracing_info() const { 826 if (TraceGen0Time) { 827 double time = PSScavenge::accumulated_time()->seconds(); 828 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 829 } 830 if (TraceGen1Time) { 831 double time = PSMarkSweep::accumulated_time()->seconds(); 832 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 833 } 834 } 835 836 837 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) { 838 // Why do we need the total_collections()-filter below? 839 if (total_collections() > 0) { 840 if (!silent) { 841 gclog_or_tty->print("permanent "); 842 } 843 perm_gen()->verify(allow_dirty); 844 845 if (!silent) { 846 gclog_or_tty->print("tenured "); 847 } 848 old_gen()->verify(allow_dirty); 849 850 if (!silent) { 851 gclog_or_tty->print("eden "); 852 } 853 young_gen()->verify(allow_dirty); 854 } 855 if (!silent) { 856 gclog_or_tty->print("ref_proc "); 857 } 858 ReferenceProcessor::verify(); 859 } 860 861 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 862 if (PrintGCDetails && Verbose) { 863 gclog_or_tty->print(" " SIZE_FORMAT 864 "->" SIZE_FORMAT 865 "(" SIZE_FORMAT ")", 866 prev_used, used(), capacity()); 867 } else { 868 gclog_or_tty->print(" " SIZE_FORMAT "K" 869 "->" SIZE_FORMAT "K" 870 "(" SIZE_FORMAT "K)", 871 prev_used / K, used() / K, capacity() / K); 872 } 873 } 874 875 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 876 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 877 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 878 return _psh; 879 } 880 881 // Before delegating the resize to the young generation, 882 // the reserved space for the young and old generations 883 // may be changed to accomodate the desired resize. 884 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 885 size_t survivor_size) { 886 if (UseAdaptiveGCBoundary) { 887 if (size_policy()->bytes_absorbed_from_eden() != 0) { 888 size_policy()->reset_bytes_absorbed_from_eden(); 889 return; // The generation changed size already. 890 } 891 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 892 } 893 894 // Delegate the resize to the generation. 895 _young_gen->resize(eden_size, survivor_size); 896 } 897 898 // Before delegating the resize to the old generation, 899 // the reserved space for the young and old generations 900 // may be changed to accomodate the desired resize. 901 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 902 if (UseAdaptiveGCBoundary) { 903 if (size_policy()->bytes_absorbed_from_eden() != 0) { 904 size_policy()->reset_bytes_absorbed_from_eden(); 905 return; // The generation changed size already. 906 } 907 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 908 } 909 910 // Delegate the resize to the generation. 911 _old_gen->resize(desired_free_space); 912 }