1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 // CollectorPolicy methods. 49 50 void CollectorPolicy::initialize_flags() { 51 assert(max_alignment() >= min_alignment(), 52 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 53 max_alignment(), min_alignment())); 54 assert(max_alignment() % min_alignment() == 0, 55 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 56 max_alignment(), min_alignment())); 57 58 if (MaxHeapSize < InitialHeapSize) { 59 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 60 } 61 62 if (MetaspaceSize > MaxMetaspaceSize) { 63 MaxMetaspaceSize = MetaspaceSize; 64 } 65 MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment())); 66 // Don't increase Metaspace size limit above specified. 67 MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment()); 68 if (MetaspaceSize > MaxMetaspaceSize) { 69 MetaspaceSize = MaxMetaspaceSize; 70 } 71 72 MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment())); 73 MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment())); 74 75 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); 76 77 assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); 78 assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); 79 if (MetaspaceSize < 256*K) { 80 vm_exit_during_initialization("Too small initial Metaspace size"); 81 } 82 } 83 84 void CollectorPolicy::initialize_size_info() { 85 // User inputs from -mx and ms must be aligned 86 set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); 87 set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); 88 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); 89 90 // Check heap parameter properties 91 if (initial_heap_byte_size() < M) { 92 vm_exit_during_initialization("Too small initial heap"); 93 } 94 // Check heap parameter properties 95 if (min_heap_byte_size() < M) { 96 vm_exit_during_initialization("Too small minimum heap"); 97 } 98 if (initial_heap_byte_size() <= NewSize) { 99 // make sure there is at least some room in old space 100 vm_exit_during_initialization("Too small initial heap for new size specified"); 101 } 102 if (max_heap_byte_size() < min_heap_byte_size()) { 103 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 104 } 105 if (initial_heap_byte_size() < min_heap_byte_size()) { 106 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 107 } 108 if (max_heap_byte_size() < initial_heap_byte_size()) { 109 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 110 } 111 112 if (PrintGCDetails && Verbose) { 113 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 114 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 115 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); 116 } 117 } 118 119 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 120 bool result = _should_clear_all_soft_refs; 121 set_should_clear_all_soft_refs(false); 122 return result; 123 } 124 125 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 126 int max_covered_regions) { 127 switch (rem_set_name()) { 128 case GenRemSet::CardTable: { 129 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); 130 return res; 131 } 132 default: 133 guarantee(false, "unrecognized GenRemSet::Name"); 134 return NULL; 135 } 136 } 137 138 void CollectorPolicy::cleared_all_soft_refs() { 139 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 140 // have been cleared in the last collection but if the gc overhear 141 // limit continues to be near, SoftRefs should still be cleared. 142 if (size_policy() != NULL) { 143 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 144 } 145 _all_soft_refs_clear = true; 146 } 147 148 149 // GenCollectorPolicy methods. 150 151 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 152 size_t x = base_size / (NewRatio+1); 153 size_t new_gen_size = x > min_alignment() ? 154 align_size_down(x, min_alignment()) : 155 min_alignment(); 156 return new_gen_size; 157 } 158 159 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 160 size_t maximum_size) { 161 size_t alignment = min_alignment(); 162 size_t max_minus = maximum_size - alignment; 163 return desired_size < max_minus ? desired_size : max_minus; 164 } 165 166 167 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 168 size_t init_promo_size, 169 size_t init_survivor_size) { 170 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 171 _size_policy = new AdaptiveSizePolicy(init_eden_size, 172 init_promo_size, 173 init_survivor_size, 174 max_gc_pause_sec, 175 GCTimeRatio); 176 } 177 178 size_t GenCollectorPolicy::compute_max_alignment() { 179 // The card marking array and the offset arrays for old generations are 180 // committed in os pages as well. Make sure they are entirely full (to 181 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 182 // byte entry and the os page size is 4096, the maximum heap size should 183 // be 512*4096 = 2MB aligned. 184 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 185 186 // Parallel GC does its own alignment of the generations to avoid requiring a 187 // large page (256M on some platforms) for the permanent generation. The 188 // other collectors should also be updated to do their own alignment and then 189 // this use of lcm() should be removed. 190 if (UseLargePages && !UseParallelGC) { 191 // in presence of large pages we have to make sure that our 192 // alignment is large page aware 193 alignment = lcm(os::large_page_size(), alignment); 194 } 195 196 return alignment; 197 } 198 199 void GenCollectorPolicy::initialize_flags() { 200 // All sizes must be multiples of the generation granularity. 201 set_min_alignment((uintx) Generation::GenGrain); 202 set_max_alignment(compute_max_alignment()); 203 204 CollectorPolicy::initialize_flags(); 205 206 // All generational heaps have a youngest gen; handle those flags here. 207 208 // Adjust max size parameters 209 if (NewSize > MaxNewSize) { 210 MaxNewSize = NewSize; 211 } 212 NewSize = align_size_down(NewSize, min_alignment()); 213 MaxNewSize = align_size_down(MaxNewSize, min_alignment()); 214 215 // Check validity of heap flags 216 assert(NewSize % min_alignment() == 0, "eden space alignment"); 217 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); 218 219 if (NewSize < 3*min_alignment()) { 220 // make sure there room for eden and two survivor spaces 221 vm_exit_during_initialization("Too small new size specified"); 222 } 223 if (SurvivorRatio < 1 || NewRatio < 1) { 224 vm_exit_during_initialization("Invalid heap ratio specified"); 225 } 226 } 227 228 void TwoGenerationCollectorPolicy::initialize_flags() { 229 GenCollectorPolicy::initialize_flags(); 230 231 OldSize = align_size_down(OldSize, min_alignment()); 232 233 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { 234 // NewRatio will be used later to set the young generation size so we use 235 // it to calculate how big the heap should be based on the requested OldSize 236 // and NewRatio. 237 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 238 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 239 240 calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); 241 MaxHeapSize = calculated_heapsize; 242 InitialHeapSize = calculated_heapsize; 243 } 244 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 245 246 // adjust max heap size if necessary 247 if (NewSize + OldSize > MaxHeapSize) { 248 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 249 // somebody set a maximum heap size with the intention that we should not 250 // exceed it. Adjust New/OldSize as necessary. 251 uintx calculated_size = NewSize + OldSize; 252 double shrink_factor = (double) MaxHeapSize / calculated_size; 253 // align 254 NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 255 // OldSize is already aligned because above we aligned MaxHeapSize to 256 // max_alignment(), and we just made sure that NewSize is aligned to 257 // min_alignment(). In initialize_flags() we verified that max_alignment() 258 // is a multiple of min_alignment(). 259 OldSize = MaxHeapSize - NewSize; 260 } else { 261 MaxHeapSize = NewSize + OldSize; 262 } 263 } 264 // need to do this again 265 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 266 267 always_do_update_barrier = UseConcMarkSweepGC; 268 269 // Check validity of heap flags 270 assert(OldSize % min_alignment() == 0, "old space alignment"); 271 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); 272 } 273 274 // Values set on the command line win over any ergonomically 275 // set command line parameters. 276 // Ergonomic choice of parameters are done before this 277 // method is called. Values for command line parameters such as NewSize 278 // and MaxNewSize feed those ergonomic choices into this method. 279 // This method makes the final generation sizings consistent with 280 // themselves and with overall heap sizings. 281 // In the absence of explicitly set command line flags, policies 282 // such as the use of NewRatio are used to size the generation. 283 void GenCollectorPolicy::initialize_size_info() { 284 CollectorPolicy::initialize_size_info(); 285 286 // min_alignment() is used for alignment within a generation. 287 // There is additional alignment done down stream for some 288 // collectors that sometimes causes unwanted rounding up of 289 // generations sizes. 290 291 // Determine maximum size of gen0 292 293 size_t max_new_size = 0; 294 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 295 if (MaxNewSize < min_alignment()) { 296 max_new_size = min_alignment(); 297 } 298 if (MaxNewSize >= max_heap_byte_size()) { 299 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 300 min_alignment()); 301 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 302 "greater than the entire heap (" SIZE_FORMAT "k). A " 303 "new generation size of " SIZE_FORMAT "k will be used.", 304 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); 305 } else { 306 max_new_size = align_size_down(MaxNewSize, min_alignment()); 307 } 308 309 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 310 // specially at this point to just use an ergonomically set 311 // MaxNewSize to set max_new_size. For cases with small 312 // heaps such a policy often did not work because the MaxNewSize 313 // was larger than the entire heap. The interpretation given 314 // to ergonomically set flags is that the flags are set 315 // by different collectors for their own special needs but 316 // are not allowed to badly shape the heap. This allows the 317 // different collectors to decide what's best for themselves 318 // without having to factor in the overall heap shape. It 319 // can be the case in the future that the collectors would 320 // only make "wise" ergonomics choices and this policy could 321 // just accept those choices. The choices currently made are 322 // not always "wise". 323 } else { 324 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); 325 // Bound the maximum size by NewSize below (since it historically 326 // would have been NewSize and because the NewRatio calculation could 327 // yield a size that is too small) and bound it by MaxNewSize above. 328 // Ergonomics plays here by previously calculating the desired 329 // NewSize and MaxNewSize. 330 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 331 } 332 assert(max_new_size > 0, "All paths should set max_new_size"); 333 334 // Given the maximum gen0 size, determine the initial and 335 // minimum gen0 sizes. 336 337 if (max_heap_byte_size() == min_heap_byte_size()) { 338 // The maximum and minimum heap sizes are the same so 339 // the generations minimum and initial must be the 340 // same as its maximum. 341 set_min_gen0_size(max_new_size); 342 set_initial_gen0_size(max_new_size); 343 set_max_gen0_size(max_new_size); 344 } else { 345 size_t desired_new_size = 0; 346 if (!FLAG_IS_DEFAULT(NewSize)) { 347 // If NewSize is set ergonomically (for example by cms), it 348 // would make sense to use it. If it is used, also use it 349 // to set the initial size. Although there is no reason 350 // the minimum size and the initial size have to be the same, 351 // the current implementation gets into trouble during the calculation 352 // of the tenured generation sizes if they are different. 353 // Note that this makes the initial size and the minimum size 354 // generally small compared to the NewRatio calculation. 355 _min_gen0_size = NewSize; 356 desired_new_size = NewSize; 357 max_new_size = MAX2(max_new_size, NewSize); 358 } else { 359 // For the case where NewSize is the default, use NewRatio 360 // to size the minimum and initial generation sizes. 361 // Use the default NewSize as the floor for these values. If 362 // NewRatio is overly large, the resulting sizes can be too 363 // small. 364 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), 365 NewSize); 366 desired_new_size = 367 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), 368 NewSize); 369 } 370 371 assert(_min_gen0_size > 0, "Sanity check"); 372 set_initial_gen0_size(desired_new_size); 373 set_max_gen0_size(max_new_size); 374 375 // At this point the desirable initial and minimum sizes have been 376 // determined without regard to the maximum sizes. 377 378 // Bound the sizes by the corresponding overall heap sizes. 379 set_min_gen0_size( 380 bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); 381 set_initial_gen0_size( 382 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); 383 set_max_gen0_size( 384 bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); 385 386 // At this point all three sizes have been checked against the 387 // maximum sizes but have not been checked for consistency 388 // among the three. 389 390 // Final check min <= initial <= max 391 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); 392 set_initial_gen0_size( 393 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); 394 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); 395 } 396 397 if (PrintGCDetails && Verbose) { 398 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 399 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 400 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 401 } 402 } 403 404 // Call this method during the sizing of the gen1 to make 405 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 406 // the most freedom in sizing because it is done before the 407 // policy for gen1 is applied. Once gen1 policies have been applied, 408 // there may be conflicts in the shape of the heap and this method 409 // is used to make the needed adjustments. The application of the 410 // policies could be more sophisticated (iterative for example) but 411 // keeping it simple also seems a worthwhile goal. 412 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 413 size_t* gen1_size_ptr, 414 const size_t heap_size, 415 const size_t min_gen1_size) { 416 bool result = false; 417 418 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 419 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 420 (heap_size >= min_gen1_size + min_alignment())) { 421 // Adjust gen0 down to accommodate min_gen1_size 422 *gen0_size_ptr = heap_size - min_gen1_size; 423 *gen0_size_ptr = 424 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), 425 min_alignment()); 426 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 427 result = true; 428 } else { 429 *gen1_size_ptr = heap_size - *gen0_size_ptr; 430 *gen1_size_ptr = 431 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), 432 min_alignment()); 433 } 434 } 435 return result; 436 } 437 438 // Minimum sizes of the generations may be different than 439 // the initial sizes. An inconsistently is permitted here 440 // in the total size that can be specified explicitly by 441 // command line specification of OldSize and NewSize and 442 // also a command line specification of -Xms. Issue a warning 443 // but allow the values to pass. 444 445 void TwoGenerationCollectorPolicy::initialize_size_info() { 446 GenCollectorPolicy::initialize_size_info(); 447 448 // At this point the minimum, initial and maximum sizes 449 // of the overall heap and of gen0 have been determined. 450 // The maximum gen1 size can be determined from the maximum gen0 451 // and maximum heap size since no explicit flags exits 452 // for setting the gen1 maximum. 453 _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 454 _max_gen1_size = 455 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), 456 min_alignment()); 457 // If no explicit command line flag has been set for the 458 // gen1 size, use what is left for gen1. 459 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 460 // The user has not specified any value or ergonomics 461 // has chosen a value (which may or may not be consistent 462 // with the overall heap size). In either case make 463 // the minimum, maximum and initial sizes consistent 464 // with the gen0 sizes and the overall heap sizes. 465 assert(min_heap_byte_size() > _min_gen0_size, 466 "gen0 has an unexpected minimum size"); 467 set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); 468 set_min_gen1_size( 469 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), 470 min_alignment())); 471 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); 472 set_initial_gen1_size( 473 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), 474 min_alignment())); 475 476 } else { 477 // It's been explicitly set on the command line. Use the 478 // OldSize and then determine the consequences. 479 set_min_gen1_size(OldSize); 480 set_initial_gen1_size(OldSize); 481 482 // If the user has explicitly set an OldSize that is inconsistent 483 // with other command line flags, issue a warning. 484 // The generation minimums and the overall heap mimimum should 485 // be within one heap alignment. 486 if ((_min_gen1_size + _min_gen0_size + min_alignment()) < 487 min_heap_byte_size()) { 488 warning("Inconsistency between minimum heap size and minimum " 489 "generation sizes: using minimum heap = " SIZE_FORMAT, 490 min_heap_byte_size()); 491 } 492 if ((OldSize > _max_gen1_size)) { 493 warning("Inconsistency between maximum heap size and maximum " 494 "generation sizes: using maximum heap = " SIZE_FORMAT 495 " -XX:OldSize flag is being ignored", 496 max_heap_byte_size()); 497 } 498 // If there is an inconsistency between the OldSize and the minimum and/or 499 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 500 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 501 min_heap_byte_size(), OldSize)) { 502 if (PrintGCDetails && Verbose) { 503 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 504 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 505 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 506 } 507 } 508 // Initial size 509 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 510 initial_heap_byte_size(), OldSize)) { 511 if (PrintGCDetails && Verbose) { 512 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 513 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 514 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 515 } 516 } 517 } 518 // Enforce the maximum gen1 size. 519 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); 520 521 // Check that min gen1 <= initial gen1 <= max gen1 522 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); 523 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); 524 525 if (PrintGCDetails && Verbose) { 526 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 527 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 528 min_gen1_size(), initial_gen1_size(), max_gen1_size()); 529 } 530 } 531 532 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 533 bool is_tlab, 534 bool* gc_overhead_limit_was_exceeded) { 535 GenCollectedHeap *gch = GenCollectedHeap::heap(); 536 537 debug_only(gch->check_for_valid_allocation_state()); 538 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 539 540 // In general gc_overhead_limit_was_exceeded should be false so 541 // set it so here and reset it to true only if the gc time 542 // limit is being exceeded as checked below. 543 *gc_overhead_limit_was_exceeded = false; 544 545 HeapWord* result = NULL; 546 547 // Loop until the allocation is satisified, 548 // or unsatisfied after GC. 549 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 550 HandleMark hm; // discard any handles allocated in each iteration 551 552 // First allocation attempt is lock-free. 553 Generation *gen0 = gch->get_gen(0); 554 assert(gen0->supports_inline_contig_alloc(), 555 "Otherwise, must do alloc within heap lock"); 556 if (gen0->should_allocate(size, is_tlab)) { 557 result = gen0->par_allocate(size, is_tlab); 558 if (result != NULL) { 559 assert(gch->is_in_reserved(result), "result not in heap"); 560 return result; 561 } 562 } 563 unsigned int gc_count_before; // read inside the Heap_lock locked region 564 { 565 MutexLocker ml(Heap_lock); 566 if (PrintGC && Verbose) { 567 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 568 " attempting locked slow path allocation"); 569 } 570 // Note that only large objects get a shot at being 571 // allocated in later generations. 572 bool first_only = ! should_try_older_generation_allocation(size); 573 574 result = gch->attempt_allocation(size, is_tlab, first_only); 575 if (result != NULL) { 576 assert(gch->is_in_reserved(result), "result not in heap"); 577 return result; 578 } 579 580 if (GC_locker::is_active_and_needs_gc()) { 581 if (is_tlab) { 582 return NULL; // Caller will retry allocating individual object 583 } 584 if (!gch->is_maximal_no_gc()) { 585 // Try and expand heap to satisfy request 586 result = expand_heap_and_allocate(size, is_tlab); 587 // result could be null if we are out of space 588 if (result != NULL) { 589 return result; 590 } 591 } 592 593 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 594 return NULL; // we didn't get to do a GC and we didn't get any memory 595 } 596 597 // If this thread is not in a jni critical section, we stall 598 // the requestor until the critical section has cleared and 599 // GC allowed. When the critical section clears, a GC is 600 // initiated by the last thread exiting the critical section; so 601 // we retry the allocation sequence from the beginning of the loop, 602 // rather than causing more, now probably unnecessary, GC attempts. 603 JavaThread* jthr = JavaThread::current(); 604 if (!jthr->in_critical()) { 605 MutexUnlocker mul(Heap_lock); 606 // Wait for JNI critical section to be exited 607 GC_locker::stall_until_clear(); 608 gclocker_stalled_count += 1; 609 continue; 610 } else { 611 if (CheckJNICalls) { 612 fatal("Possible deadlock due to allocating while" 613 " in jni critical section"); 614 } 615 return NULL; 616 } 617 } 618 619 // Read the gc count while the heap lock is held. 620 gc_count_before = Universe::heap()->total_collections(); 621 } 622 623 VM_GenCollectForAllocation op(size, 624 is_tlab, 625 gc_count_before); 626 VMThread::execute(&op); 627 if (op.prologue_succeeded()) { 628 result = op.result(); 629 if (op.gc_locked()) { 630 assert(result == NULL, "must be NULL if gc_locked() is true"); 631 continue; // retry and/or stall as necessary 632 } 633 634 // Allocation has failed and a collection 635 // has been done. If the gc time limit was exceeded the 636 // this time, return NULL so that an out-of-memory 637 // will be thrown. Clear gc_overhead_limit_exceeded 638 // so that the overhead exceeded does not persist. 639 640 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 641 const bool softrefs_clear = all_soft_refs_clear(); 642 643 if (limit_exceeded && softrefs_clear) { 644 *gc_overhead_limit_was_exceeded = true; 645 size_policy()->set_gc_overhead_limit_exceeded(false); 646 if (op.result() != NULL) { 647 CollectedHeap::fill_with_object(op.result(), size); 648 } 649 return NULL; 650 } 651 assert(result == NULL || gch->is_in_reserved(result), 652 "result not in heap"); 653 return result; 654 } 655 656 // Give a warning if we seem to be looping forever. 657 if ((QueuedAllocationWarningCount > 0) && 658 (try_count % QueuedAllocationWarningCount == 0)) { 659 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 660 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 661 } 662 } 663 } 664 665 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 666 bool is_tlab) { 667 GenCollectedHeap *gch = GenCollectedHeap::heap(); 668 HeapWord* result = NULL; 669 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 670 Generation *gen = gch->get_gen(i); 671 if (gen->should_allocate(size, is_tlab)) { 672 result = gen->expand_and_allocate(size, is_tlab); 673 } 674 } 675 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 676 return result; 677 } 678 679 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 680 bool is_tlab) { 681 GenCollectedHeap *gch = GenCollectedHeap::heap(); 682 GCCauseSetter x(gch, GCCause::_allocation_failure); 683 HeapWord* result = NULL; 684 685 assert(size != 0, "Precondition violated"); 686 if (GC_locker::is_active_and_needs_gc()) { 687 // GC locker is active; instead of a collection we will attempt 688 // to expand the heap, if there's room for expansion. 689 if (!gch->is_maximal_no_gc()) { 690 result = expand_heap_and_allocate(size, is_tlab); 691 } 692 return result; // could be null if we are out of space 693 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 694 // Do an incremental collection. 695 gch->do_collection(false /* full */, 696 false /* clear_all_soft_refs */, 697 size /* size */, 698 is_tlab /* is_tlab */, 699 number_of_generations() - 1 /* max_level */); 700 } else { 701 if (Verbose && PrintGCDetails) { 702 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 703 } 704 // Try a full collection; see delta for bug id 6266275 705 // for the original code and why this has been simplified 706 // with from-space allocation criteria modified and 707 // such allocation moved out of the safepoint path. 708 gch->do_collection(true /* full */, 709 false /* clear_all_soft_refs */, 710 size /* size */, 711 is_tlab /* is_tlab */, 712 number_of_generations() - 1 /* max_level */); 713 } 714 715 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 716 717 if (result != NULL) { 718 assert(gch->is_in_reserved(result), "result not in heap"); 719 return result; 720 } 721 722 // OK, collection failed, try expansion. 723 result = expand_heap_and_allocate(size, is_tlab); 724 if (result != NULL) { 725 return result; 726 } 727 728 // If we reach this point, we're really out of memory. Try every trick 729 // we can to reclaim memory. Force collection of soft references. Force 730 // a complete compaction of the heap. Any additional methods for finding 731 // free memory should be here, especially if they are expensive. If this 732 // attempt fails, an OOM exception will be thrown. 733 { 734 IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 735 736 gch->do_collection(true /* full */, 737 true /* clear_all_soft_refs */, 738 size /* size */, 739 is_tlab /* is_tlab */, 740 number_of_generations() - 1 /* max_level */); 741 } 742 743 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 744 if (result != NULL) { 745 assert(gch->is_in_reserved(result), "result not in heap"); 746 return result; 747 } 748 749 assert(!should_clear_all_soft_refs(), 750 "Flag should have been handled and cleared prior to this point"); 751 752 // What else? We might try synchronous finalization later. If the total 753 // space available is large enough for the allocation, then a more 754 // complete compaction phase than we've tried so far might be 755 // appropriate. 756 return NULL; 757 } 758 759 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 760 ClassLoaderData* loader_data, 761 size_t word_size, 762 Metaspace::MetadataType mdtype) { 763 uint loop_count = 0; 764 uint gc_count = 0; 765 uint full_gc_count = 0; 766 767 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 768 769 do { 770 MetaWord* result = NULL; 771 if (GC_locker::is_active_and_needs_gc()) { 772 // If the GC_locker is active, just expand and allocate. 773 // If that does not succeed, wait if this thread is not 774 // in a critical section itself. 775 result = 776 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 777 mdtype); 778 if (result != NULL) { 779 return result; 780 } 781 JavaThread* jthr = JavaThread::current(); 782 if (!jthr->in_critical()) { 783 // Wait for JNI critical section to be exited 784 GC_locker::stall_until_clear(); 785 // The GC invoked by the last thread leaving the critical 786 // section will be a young collection and a full collection 787 // is (currently) needed for unloading classes so continue 788 // to the next iteration to get a full GC. 789 continue; 790 } else { 791 if (CheckJNICalls) { 792 fatal("Possible deadlock due to allocating while" 793 " in jni critical section"); 794 } 795 return NULL; 796 } 797 } 798 799 { // Need lock to get self consistent gc_count's 800 MutexLocker ml(Heap_lock); 801 gc_count = Universe::heap()->total_collections(); 802 full_gc_count = Universe::heap()->total_full_collections(); 803 } 804 805 // Generate a VM operation 806 VM_CollectForMetadataAllocation op(loader_data, 807 word_size, 808 mdtype, 809 gc_count, 810 full_gc_count, 811 GCCause::_metadata_GC_threshold); 812 VMThread::execute(&op); 813 814 // If GC was locked out, try again. Check 815 // before checking success because the prologue 816 // could have succeeded and the GC still have 817 // been locked out. 818 if (op.gc_locked()) { 819 continue; 820 } 821 822 if (op.prologue_succeeded()) { 823 return op.result(); 824 } 825 loop_count++; 826 if ((QueuedAllocationWarningCount > 0) && 827 (loop_count % QueuedAllocationWarningCount == 0)) { 828 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 829 " size=%d", loop_count, word_size); 830 } 831 } while (true); // Until a GC is done 832 } 833 834 // Return true if any of the following is true: 835 // . the allocation won't fit into the current young gen heap 836 // . gc locker is occupied (jni critical section) 837 // . heap memory is tight -- the most recent previous collection 838 // was a full collection because a partial collection (would 839 // have) failed and is likely to fail again 840 bool GenCollectorPolicy::should_try_older_generation_allocation( 841 size_t word_size) const { 842 GenCollectedHeap* gch = GenCollectedHeap::heap(); 843 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 844 return (word_size > heap_word_size(gen0_capacity)) 845 || GC_locker::is_active_and_needs_gc() 846 || gch->incremental_collection_failed(); 847 } 848 849 850 // 851 // MarkSweepPolicy methods 852 // 853 854 MarkSweepPolicy::MarkSweepPolicy() { 855 initialize_all(); 856 } 857 858 void MarkSweepPolicy::initialize_generations() { 859 _generations = new GenerationSpecPtr[number_of_generations()]; 860 if (_generations == NULL) 861 vm_exit_during_initialization("Unable to allocate gen spec"); 862 863 if (UseParNewGC) { 864 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 865 } else { 866 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 867 } 868 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 869 870 if (_generations[0] == NULL || _generations[1] == NULL) 871 vm_exit_during_initialization("Unable to allocate gen spec"); 872 } 873 874 void MarkSweepPolicy::initialize_gc_policy_counters() { 875 // initialize the policy counters - 2 collectors, 3 generations 876 if (UseParNewGC) { 877 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 878 } else { 879 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 880 } 881 }