1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/vmThread.hpp" 41 #ifdef TARGET_OS_FAMILY_linux 42 # include "thread_linux.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_solaris 45 # include "thread_solaris.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_FAMILY_windows 48 # include "thread_windows.inline.hpp" 49 #endif 50 #ifdef TARGET_OS_FAMILY_bsd 51 # include "thread_bsd.inline.hpp" 52 #endif 53 #ifndef SERIALGC 54 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 55 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 56 #endif 57 58 // CollectorPolicy methods. 59 60 void CollectorPolicy::initialize_flags() { 61 assert(max_alignment() >= min_alignment(), 62 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 63 max_alignment(), min_alignment())); 64 assert(max_alignment() % min_alignment() == 0, 65 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 66 max_alignment(), min_alignment())); 67 68 if (PermSize > MaxPermSize) { 69 MaxPermSize = PermSize; 70 } 71 PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment())); 72 // Don't increase Perm size limit above specified. 73 MaxPermSize = align_size_down(MaxPermSize, max_alignment()); 74 if (PermSize > MaxPermSize) { 75 PermSize = MaxPermSize; 76 } 77 78 MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment())); 79 MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment())); 80 81 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); 82 83 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment()); 84 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment()); 85 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment()); 86 87 assert(PermSize % min_alignment() == 0, "permanent space alignment"); 88 assert(MaxPermSize % max_alignment() == 0, "maximum permanent space alignment"); 89 assert(SharedReadOnlySize % max_alignment() == 0, "read-only space alignment"); 90 assert(SharedReadWriteSize % max_alignment() == 0, "read-write space alignment"); 91 assert(SharedMiscDataSize % max_alignment() == 0, "misc-data space alignment"); 92 if (PermSize < M) { 93 vm_exit_during_initialization("Too small initial permanent heap"); 94 } 95 } 96 97 void CollectorPolicy::initialize_size_info() { 98 // User inputs from -mx and ms are aligned 99 set_initial_heap_byte_size(InitialHeapSize); 100 if (initial_heap_byte_size() == 0) { 101 set_initial_heap_byte_size(NewSize + OldSize); 102 } 103 set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size, 104 min_alignment())); 105 106 set_min_heap_byte_size(Arguments::min_heap_size()); 107 if (min_heap_byte_size() == 0) { 108 set_min_heap_byte_size(NewSize + OldSize); 109 } 110 set_min_heap_byte_size(align_size_up(_min_heap_byte_size, 111 min_alignment())); 112 113 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); 114 115 // Check heap parameter properties 116 if (initial_heap_byte_size() < M) { 117 vm_exit_during_initialization("Too small initial heap"); 118 } 119 // Check heap parameter properties 120 if (min_heap_byte_size() < M) { 121 vm_exit_during_initialization("Too small minimum heap"); 122 } 123 if (initial_heap_byte_size() <= NewSize) { 124 // make sure there is at least some room in old space 125 vm_exit_during_initialization("Too small initial heap for new size specified"); 126 } 127 if (max_heap_byte_size() < min_heap_byte_size()) { 128 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 129 } 130 if (initial_heap_byte_size() < min_heap_byte_size()) { 131 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 132 } 133 if (max_heap_byte_size() < initial_heap_byte_size()) { 134 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 135 } 136 137 if (PrintGCDetails && Verbose) { 138 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 139 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 140 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); 141 } 142 } 143 144 void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) { 145 _permanent_generation = 146 new PermanentGenerationSpec(pgnm, PermSize, MaxPermSize, 147 SharedReadOnlySize, 148 SharedReadWriteSize, 149 SharedMiscDataSize, 150 SharedMiscCodeSize); 151 if (_permanent_generation == NULL) { 152 vm_exit_during_initialization("Unable to allocate gen spec"); 153 } 154 } 155 156 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 157 bool result = _should_clear_all_soft_refs; 158 set_should_clear_all_soft_refs(false); 159 return result; 160 } 161 162 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 163 int max_covered_regions) { 164 switch (rem_set_name()) { 165 case GenRemSet::CardTable: { 166 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); 167 return res; 168 } 169 default: 170 guarantee(false, "unrecognized GenRemSet::Name"); 171 return NULL; 172 } 173 } 174 175 void CollectorPolicy::cleared_all_soft_refs() { 176 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 177 // have been cleared in the last collection but if the gc overhear 178 // limit continues to be near, SoftRefs should still be cleared. 179 if (size_policy() != NULL) { 180 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 181 } 182 _all_soft_refs_clear = true; 183 } 184 185 186 // GenCollectorPolicy methods. 187 188 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 189 size_t x = base_size / (NewRatio+1); 190 size_t new_gen_size = x > min_alignment() ? 191 align_size_down(x, min_alignment()) : 192 min_alignment(); 193 return new_gen_size; 194 } 195 196 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 197 size_t maximum_size) { 198 size_t alignment = min_alignment(); 199 size_t max_minus = maximum_size - alignment; 200 return desired_size < max_minus ? desired_size : max_minus; 201 } 202 203 204 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 205 size_t init_promo_size, 206 size_t init_survivor_size) { 207 const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 208 _size_policy = new AdaptiveSizePolicy(init_eden_size, 209 init_promo_size, 210 init_survivor_size, 211 max_gc_minor_pause_sec, 212 GCTimeRatio); 213 } 214 215 size_t GenCollectorPolicy::compute_max_alignment() { 216 // The card marking array and the offset arrays for old generations are 217 // committed in os pages as well. Make sure they are entirely full (to 218 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 219 // byte entry and the os page size is 4096, the maximum heap size should 220 // be 512*4096 = 2MB aligned. 221 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 222 223 // Parallel GC does its own alignment of the generations to avoid requiring a 224 // large page (256M on some platforms) for the permanent generation. The 225 // other collectors should also be updated to do their own alignment and then 226 // this use of lcm() should be removed. 227 if (UseLargePages && !UseParallelGC) { 228 // in presence of large pages we have to make sure that our 229 // alignment is large page aware 230 alignment = lcm(os::large_page_size(), alignment); 231 } 232 233 return alignment; 234 } 235 236 void GenCollectorPolicy::initialize_flags() { 237 // All sizes must be multiples of the generation granularity. 238 set_min_alignment((uintx) Generation::GenGrain); 239 set_max_alignment(compute_max_alignment()); 240 241 CollectorPolicy::initialize_flags(); 242 243 // All generational heaps have a youngest gen; handle those flags here. 244 245 // Adjust max size parameters 246 if (NewSize > MaxNewSize) { 247 MaxNewSize = NewSize; 248 } 249 NewSize = align_size_down(NewSize, min_alignment()); 250 MaxNewSize = align_size_down(MaxNewSize, min_alignment()); 251 252 // Check validity of heap flags 253 assert(NewSize % min_alignment() == 0, "eden space alignment"); 254 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); 255 256 if (NewSize < 3*min_alignment()) { 257 // make sure there room for eden and two survivor spaces 258 vm_exit_during_initialization("Too small new size specified"); 259 } 260 if (SurvivorRatio < 1 || NewRatio < 1) { 261 vm_exit_during_initialization("Invalid heap ratio specified"); 262 } 263 } 264 265 void TwoGenerationCollectorPolicy::initialize_flags() { 266 GenCollectorPolicy::initialize_flags(); 267 268 OldSize = align_size_down(OldSize, min_alignment()); 269 if (NewSize + OldSize > MaxHeapSize) { 270 MaxHeapSize = NewSize + OldSize; 271 } 272 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 273 274 always_do_update_barrier = UseConcMarkSweepGC; 275 276 // Check validity of heap flags 277 assert(OldSize % min_alignment() == 0, "old space alignment"); 278 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); 279 } 280 281 // Values set on the command line win over any ergonomically 282 // set command line parameters. 283 // Ergonomic choice of parameters are done before this 284 // method is called. Values for command line parameters such as NewSize 285 // and MaxNewSize feed those ergonomic choices into this method. 286 // This method makes the final generation sizings consistent with 287 // themselves and with overall heap sizings. 288 // In the absence of explicitly set command line flags, policies 289 // such as the use of NewRatio are used to size the generation. 290 void GenCollectorPolicy::initialize_size_info() { 291 CollectorPolicy::initialize_size_info(); 292 293 // min_alignment() is used for alignment within a generation. 294 // There is additional alignment done down stream for some 295 // collectors that sometimes causes unwanted rounding up of 296 // generations sizes. 297 298 // Determine maximum size of gen0 299 300 size_t max_new_size = 0; 301 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 302 if (MaxNewSize < min_alignment()) { 303 max_new_size = min_alignment(); 304 } 305 if (MaxNewSize >= max_heap_byte_size()) { 306 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 307 min_alignment()); 308 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 309 "greater than the entire heap (" SIZE_FORMAT "k). A " 310 "new generation size of " SIZE_FORMAT "k will be used.", 311 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); 312 } else { 313 max_new_size = align_size_down(MaxNewSize, min_alignment()); 314 } 315 316 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 317 // specially at this point to just use an ergonomically set 318 // MaxNewSize to set max_new_size. For cases with small 319 // heaps such a policy often did not work because the MaxNewSize 320 // was larger than the entire heap. The interpretation given 321 // to ergonomically set flags is that the flags are set 322 // by different collectors for their own special needs but 323 // are not allowed to badly shape the heap. This allows the 324 // different collectors to decide what's best for themselves 325 // without having to factor in the overall heap shape. It 326 // can be the case in the future that the collectors would 327 // only make "wise" ergonomics choices and this policy could 328 // just accept those choices. The choices currently made are 329 // not always "wise". 330 } else { 331 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); 332 // Bound the maximum size by NewSize below (since it historically 333 // would have been NewSize and because the NewRatio calculation could 334 // yield a size that is too small) and bound it by MaxNewSize above. 335 // Ergonomics plays here by previously calculating the desired 336 // NewSize and MaxNewSize. 337 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 338 } 339 assert(max_new_size > 0, "All paths should set max_new_size"); 340 341 // Given the maximum gen0 size, determine the initial and 342 // minimum gen0 sizes. 343 344 if (max_heap_byte_size() == min_heap_byte_size()) { 345 // The maximum and minimum heap sizes are the same so 346 // the generations minimum and initial must be the 347 // same as its maximum. 348 set_min_gen0_size(max_new_size); 349 set_initial_gen0_size(max_new_size); 350 set_max_gen0_size(max_new_size); 351 } else { 352 size_t desired_new_size = 0; 353 if (!FLAG_IS_DEFAULT(NewSize)) { 354 // If NewSize is set ergonomically (for example by cms), it 355 // would make sense to use it. If it is used, also use it 356 // to set the initial size. Although there is no reason 357 // the minimum size and the initial size have to be the same, 358 // the current implementation gets into trouble during the calculation 359 // of the tenured generation sizes if they are different. 360 // Note that this makes the initial size and the minimum size 361 // generally small compared to the NewRatio calculation. 362 _min_gen0_size = NewSize; 363 desired_new_size = NewSize; 364 max_new_size = MAX2(max_new_size, NewSize); 365 } else { 366 // For the case where NewSize is the default, use NewRatio 367 // to size the minimum and initial generation sizes. 368 // Use the default NewSize as the floor for these values. If 369 // NewRatio is overly large, the resulting sizes can be too 370 // small. 371 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), 372 NewSize); 373 desired_new_size = 374 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), 375 NewSize); 376 } 377 378 assert(_min_gen0_size > 0, "Sanity check"); 379 set_initial_gen0_size(desired_new_size); 380 set_max_gen0_size(max_new_size); 381 382 // At this point the desirable initial and minimum sizes have been 383 // determined without regard to the maximum sizes. 384 385 // Bound the sizes by the corresponding overall heap sizes. 386 set_min_gen0_size( 387 bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); 388 set_initial_gen0_size( 389 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); 390 set_max_gen0_size( 391 bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); 392 393 // At this point all three sizes have been checked against the 394 // maximum sizes but have not been checked for consistency 395 // among the three. 396 397 // Final check min <= initial <= max 398 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); 399 set_initial_gen0_size( 400 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); 401 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); 402 } 403 404 if (PrintGCDetails && Verbose) { 405 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 406 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 407 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 408 } 409 } 410 411 // Call this method during the sizing of the gen1 to make 412 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 413 // the most freedom in sizing because it is done before the 414 // policy for gen1 is applied. Once gen1 policies have been applied, 415 // there may be conflicts in the shape of the heap and this method 416 // is used to make the needed adjustments. The application of the 417 // policies could be more sophisticated (iterative for example) but 418 // keeping it simple also seems a worthwhile goal. 419 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 420 size_t* gen1_size_ptr, 421 size_t heap_size, 422 size_t min_gen0_size) { 423 bool result = false; 424 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 425 if (((*gen0_size_ptr + OldSize) > heap_size) && 426 (heap_size - min_gen0_size) >= min_alignment()) { 427 // Adjust gen0 down to accomodate OldSize 428 *gen0_size_ptr = heap_size - min_gen0_size; 429 *gen0_size_ptr = 430 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), 431 min_alignment()); 432 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 433 result = true; 434 } else { 435 *gen1_size_ptr = heap_size - *gen0_size_ptr; 436 *gen1_size_ptr = 437 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), 438 min_alignment()); 439 } 440 } 441 return result; 442 } 443 444 // Minimum sizes of the generations may be different than 445 // the initial sizes. An inconsistently is permitted here 446 // in the total size that can be specified explicitly by 447 // command line specification of OldSize and NewSize and 448 // also a command line specification of -Xms. Issue a warning 449 // but allow the values to pass. 450 451 void TwoGenerationCollectorPolicy::initialize_size_info() { 452 GenCollectorPolicy::initialize_size_info(); 453 454 // At this point the minimum, initial and maximum sizes 455 // of the overall heap and of gen0 have been determined. 456 // The maximum gen1 size can be determined from the maximum gen0 457 // and maximum heap size since no explicit flags exits 458 // for setting the gen1 maximum. 459 _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 460 _max_gen1_size = 461 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), 462 min_alignment()); 463 // If no explicit command line flag has been set for the 464 // gen1 size, use what is left for gen1. 465 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 466 // The user has not specified any value or ergonomics 467 // has chosen a value (which may or may not be consistent 468 // with the overall heap size). In either case make 469 // the minimum, maximum and initial sizes consistent 470 // with the gen0 sizes and the overall heap sizes. 471 assert(min_heap_byte_size() > _min_gen0_size, 472 "gen0 has an unexpected minimum size"); 473 set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); 474 set_min_gen1_size( 475 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), 476 min_alignment())); 477 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); 478 set_initial_gen1_size( 479 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), 480 min_alignment())); 481 482 } else { 483 // It's been explicitly set on the command line. Use the 484 // OldSize and then determine the consequences. 485 set_min_gen1_size(OldSize); 486 set_initial_gen1_size(OldSize); 487 488 // If the user has explicitly set an OldSize that is inconsistent 489 // with other command line flags, issue a warning. 490 // The generation minimums and the overall heap mimimum should 491 // be within one heap alignment. 492 if ((_min_gen1_size + _min_gen0_size + min_alignment()) < 493 min_heap_byte_size()) { 494 warning("Inconsistency between minimum heap size and minimum " 495 "generation sizes: using minimum heap = " SIZE_FORMAT, 496 min_heap_byte_size()); 497 } 498 if ((OldSize > _max_gen1_size)) { 499 warning("Inconsistency between maximum heap size and maximum " 500 "generation sizes: using maximum heap = " SIZE_FORMAT 501 " -XX:OldSize flag is being ignored", 502 max_heap_byte_size()); 503 } 504 // If there is an inconsistency between the OldSize and the minimum and/or 505 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 506 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 507 min_heap_byte_size(), OldSize)) { 508 if (PrintGCDetails && Verbose) { 509 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 510 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 511 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 512 } 513 } 514 // Initial size 515 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 516 initial_heap_byte_size(), OldSize)) { 517 if (PrintGCDetails && Verbose) { 518 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 519 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 520 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 521 } 522 } 523 } 524 // Enforce the maximum gen1 size. 525 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); 526 527 // Check that min gen1 <= initial gen1 <= max gen1 528 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); 529 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); 530 531 if (PrintGCDetails && Verbose) { 532 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 533 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 534 min_gen1_size(), initial_gen1_size(), max_gen1_size()); 535 } 536 } 537 538 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 539 bool is_tlab, 540 bool* gc_overhead_limit_was_exceeded) { 541 GenCollectedHeap *gch = GenCollectedHeap::heap(); 542 543 debug_only(gch->check_for_valid_allocation_state()); 544 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 545 546 // In general gc_overhead_limit_was_exceeded should be false so 547 // set it so here and reset it to true only if the gc time 548 // limit is being exceeded as checked below. 549 *gc_overhead_limit_was_exceeded = false; 550 551 HeapWord* result = NULL; 552 553 // Loop until the allocation is satisified, 554 // or unsatisfied after GC. 555 for (int try_count = 1; /* return or throw */; try_count += 1) { 556 HandleMark hm; // discard any handles allocated in each iteration 557 558 // First allocation attempt is lock-free. 559 Generation *gen0 = gch->get_gen(0); 560 assert(gen0->supports_inline_contig_alloc(), 561 "Otherwise, must do alloc within heap lock"); 562 if (gen0->should_allocate(size, is_tlab)) { 563 result = gen0->par_allocate(size, is_tlab); 564 if (result != NULL) { 565 assert(gch->is_in_reserved(result), "result not in heap"); 566 return result; 567 } 568 } 569 unsigned int gc_count_before; // read inside the Heap_lock locked region 570 { 571 MutexLocker ml(Heap_lock); 572 if (PrintGC && Verbose) { 573 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 574 " attempting locked slow path allocation"); 575 } 576 // Note that only large objects get a shot at being 577 // allocated in later generations. 578 bool first_only = ! should_try_older_generation_allocation(size); 579 580 result = gch->attempt_allocation(size, is_tlab, first_only); 581 if (result != NULL) { 582 assert(gch->is_in_reserved(result), "result not in heap"); 583 return result; 584 } 585 586 if (GC_locker::is_active_and_needs_gc()) { 587 if (is_tlab) { 588 return NULL; // Caller will retry allocating individual object 589 } 590 if (!gch->is_maximal_no_gc()) { 591 // Try and expand heap to satisfy request 592 result = expand_heap_and_allocate(size, is_tlab); 593 // result could be null if we are out of space 594 if (result != NULL) { 595 return result; 596 } 597 } 598 599 // If this thread is not in a jni critical section, we stall 600 // the requestor until the critical section has cleared and 601 // GC allowed. When the critical section clears, a GC is 602 // initiated by the last thread exiting the critical section; so 603 // we retry the allocation sequence from the beginning of the loop, 604 // rather than causing more, now probably unnecessary, GC attempts. 605 JavaThread* jthr = JavaThread::current(); 606 if (!jthr->in_critical()) { 607 MutexUnlocker mul(Heap_lock); 608 // Wait for JNI critical section to be exited 609 GC_locker::stall_until_clear(); 610 continue; 611 } else { 612 if (CheckJNICalls) { 613 fatal("Possible deadlock due to allocating while" 614 " in jni critical section"); 615 } 616 return NULL; 617 } 618 } 619 620 // Read the gc count while the heap lock is held. 621 gc_count_before = Universe::heap()->total_collections(); 622 } 623 624 VM_GenCollectForAllocation op(size, 625 is_tlab, 626 gc_count_before); 627 VMThread::execute(&op); 628 if (op.prologue_succeeded()) { 629 result = op.result(); 630 if (op.gc_locked()) { 631 assert(result == NULL, "must be NULL if gc_locked() is true"); 632 continue; // retry and/or stall as necessary 633 } 634 635 // Allocation has failed and a collection 636 // has been done. If the gc time limit was exceeded the 637 // this time, return NULL so that an out-of-memory 638 // will be thrown. Clear gc_overhead_limit_exceeded 639 // so that the overhead exceeded does not persist. 640 641 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 642 const bool softrefs_clear = all_soft_refs_clear(); 643 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); 644 if (limit_exceeded && softrefs_clear) { 645 *gc_overhead_limit_was_exceeded = true; 646 size_policy()->set_gc_overhead_limit_exceeded(false); 647 if (op.result() != NULL) { 648 CollectedHeap::fill_with_object(op.result(), size); 649 } 650 return NULL; 651 } 652 assert(result == NULL || gch->is_in_reserved(result), 653 "result not in heap"); 654 return result; 655 } 656 657 // Give a warning if we seem to be looping forever. 658 if ((QueuedAllocationWarningCount > 0) && 659 (try_count % QueuedAllocationWarningCount == 0)) { 660 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 661 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 662 } 663 } 664 } 665 666 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 667 bool is_tlab) { 668 GenCollectedHeap *gch = GenCollectedHeap::heap(); 669 HeapWord* result = NULL; 670 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 671 Generation *gen = gch->get_gen(i); 672 if (gen->should_allocate(size, is_tlab)) { 673 result = gen->expand_and_allocate(size, is_tlab); 674 } 675 } 676 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 677 return result; 678 } 679 680 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 681 bool is_tlab) { 682 GenCollectedHeap *gch = GenCollectedHeap::heap(); 683 GCCauseSetter x(gch, GCCause::_allocation_failure); 684 HeapWord* result = NULL; 685 686 assert(size != 0, "Precondition violated"); 687 if (GC_locker::is_active_and_needs_gc()) { 688 // GC locker is active; instead of a collection we will attempt 689 // to expand the heap, if there's room for expansion. 690 if (!gch->is_maximal_no_gc()) { 691 result = expand_heap_and_allocate(size, is_tlab); 692 } 693 return result; // could be null if we are out of space 694 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 695 // Do an incremental collection. 696 gch->do_collection(false /* full */, 697 false /* clear_all_soft_refs */, 698 size /* size */, 699 is_tlab /* is_tlab */, 700 number_of_generations() - 1 /* max_level */); 701 } else { 702 if (Verbose && PrintGCDetails) { 703 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 704 } 705 // Try a full collection; see delta for bug id 6266275 706 // for the original code and why this has been simplified 707 // with from-space allocation criteria modified and 708 // such allocation moved out of the safepoint path. 709 gch->do_collection(true /* full */, 710 false /* clear_all_soft_refs */, 711 size /* size */, 712 is_tlab /* is_tlab */, 713 number_of_generations() - 1 /* max_level */); 714 } 715 716 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 717 718 if (result != NULL) { 719 assert(gch->is_in_reserved(result), "result not in heap"); 720 return result; 721 } 722 723 // OK, collection failed, try expansion. 724 result = expand_heap_and_allocate(size, is_tlab); 725 if (result != NULL) { 726 return result; 727 } 728 729 // If we reach this point, we're really out of memory. Try every trick 730 // we can to reclaim memory. Force collection of soft references. Force 731 // a complete compaction of the heap. Any additional methods for finding 732 // free memory should be here, especially if they are expensive. If this 733 // attempt fails, an OOM exception will be thrown. 734 { 735 IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 736 737 gch->do_collection(true /* full */, 738 true /* clear_all_soft_refs */, 739 size /* size */, 740 is_tlab /* is_tlab */, 741 number_of_generations() - 1 /* max_level */); 742 } 743 744 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 745 if (result != NULL) { 746 assert(gch->is_in_reserved(result), "result not in heap"); 747 return result; 748 } 749 750 assert(!should_clear_all_soft_refs(), 751 "Flag should have been handled and cleared prior to this point"); 752 753 // What else? We might try synchronous finalization later. If the total 754 // space available is large enough for the allocation, then a more 755 // complete compaction phase than we've tried so far might be 756 // appropriate. 757 return NULL; 758 } 759 760 // Return true if any of the following is true: 761 // . the allocation won't fit into the current young gen heap 762 // . gc locker is occupied (jni critical section) 763 // . heap memory is tight -- the most recent previous collection 764 // was a full collection because a partial collection (would 765 // have) failed and is likely to fail again 766 bool GenCollectorPolicy::should_try_older_generation_allocation( 767 size_t word_size) const { 768 GenCollectedHeap* gch = GenCollectedHeap::heap(); 769 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 770 return (word_size > heap_word_size(gen0_capacity)) 771 || GC_locker::is_active_and_needs_gc() 772 || gch->incremental_collection_failed(); 773 } 774 775 776 // 777 // MarkSweepPolicy methods 778 // 779 780 MarkSweepPolicy::MarkSweepPolicy() { 781 initialize_all(); 782 } 783 784 void MarkSweepPolicy::initialize_generations() { 785 initialize_perm_generation(PermGen::MarkSweepCompact); 786 _generations = new GenerationSpecPtr[number_of_generations()]; 787 if (_generations == NULL) 788 vm_exit_during_initialization("Unable to allocate gen spec"); 789 790 if (UseParNewGC && ParallelGCThreads > 0) { 791 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 792 } else { 793 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 794 } 795 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 796 797 if (_generations[0] == NULL || _generations[1] == NULL) 798 vm_exit_during_initialization("Unable to allocate gen spec"); 799 } 800 801 void MarkSweepPolicy::initialize_gc_policy_counters() { 802 // initialize the policy counters - 2 collectors, 3 generations 803 if (UseParNewGC && ParallelGCThreads > 0) { 804 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 805 } 806 else { 807 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 808 } 809 }