1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 // CollectorPolicy methods. 49 50 void CollectorPolicy::initialize_flags() { 51 if (MaxHeapSize < InitialHeapSize) { 52 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 53 } 54 55 if (MetaspaceSize > MaxMetaspaceSize) { 56 MaxMetaspaceSize = MetaspaceSize; 57 } 58 MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment())); 59 // Don't increase Metaspace size limit above specified. 60 MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment()); 61 if (MetaspaceSize > MaxMetaspaceSize) { 62 MetaspaceSize = MaxMetaspaceSize; 63 } 64 65 MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment())); 66 MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment())); 67 68 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); 69 70 assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); 71 assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); 72 if (MetaspaceSize < 256*K) { 73 vm_exit_during_initialization("Too small initial Metaspace size"); 74 } 75 } 76 77 void CollectorPolicy::initialize_size_info() { 78 // User inputs from -mx and ms are aligned 79 set_initial_heap_byte_size(InitialHeapSize); 80 if (initial_heap_byte_size() == 0) { 81 set_initial_heap_byte_size(NewSize + OldSize); 82 } 83 set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size, 84 min_alignment())); 85 86 set_min_heap_byte_size(Arguments::min_heap_size()); 87 if (min_heap_byte_size() == 0) { 88 set_min_heap_byte_size(NewSize + OldSize); 89 } 90 set_min_heap_byte_size(align_size_up(_min_heap_byte_size, 91 min_alignment())); 92 93 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); 94 95 // Check heap parameter properties 96 if (initial_heap_byte_size() < M) { 97 vm_exit_during_initialization("Too small initial heap"); 98 } 99 // Check heap parameter properties 100 if (min_heap_byte_size() < M) { 101 vm_exit_during_initialization("Too small minimum heap"); 102 } 103 if (initial_heap_byte_size() <= NewSize) { 104 // make sure there is at least some room in old space 105 vm_exit_during_initialization("Too small initial heap for new size specified"); 106 } 107 if (max_heap_byte_size() < min_heap_byte_size()) { 108 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 109 } 110 if (initial_heap_byte_size() < min_heap_byte_size()) { 111 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 112 } 113 if (max_heap_byte_size() < initial_heap_byte_size()) { 114 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 115 } 116 117 if (PrintGCDetails && Verbose) { 118 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 119 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 120 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); 121 } 122 } 123 124 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 125 bool result = _should_clear_all_soft_refs; 126 set_should_clear_all_soft_refs(false); 127 return result; 128 } 129 130 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 131 int max_covered_regions) { 132 switch (rem_set_name()) { 133 case GenRemSet::CardTable: { 134 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); 135 return res; 136 } 137 default: 138 guarantee(false, "unrecognized GenRemSet::Name"); 139 return NULL; 140 } 141 } 142 143 void CollectorPolicy::cleared_all_soft_refs() { 144 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 145 // have been cleared in the last collection but if the gc overhear 146 // limit continues to be near, SoftRefs should still be cleared. 147 if (size_policy() != NULL) { 148 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 149 } 150 _all_soft_refs_clear = true; 151 } 152 153 154 // GenCollectorPolicy methods. 155 156 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 157 size_t x = base_size / (NewRatio+1); 158 size_t new_gen_size = x > min_alignment() ? 159 align_size_down(x, min_alignment()) : 160 min_alignment(); 161 return new_gen_size; 162 } 163 164 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 165 size_t maximum_size) { 166 size_t alignment = min_alignment(); 167 size_t max_minus = maximum_size - alignment; 168 return desired_size < max_minus ? desired_size : max_minus; 169 } 170 171 172 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 173 size_t init_promo_size, 174 size_t init_survivor_size) { 175 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 176 _size_policy = new AdaptiveSizePolicy(init_eden_size, 177 init_promo_size, 178 init_survivor_size, 179 max_gc_pause_sec, 180 GCTimeRatio); 181 } 182 183 size_t GenCollectorPolicy::compute_max_alignment() { 184 // The card marking array and the offset arrays for old generations are 185 // committed in os pages as well. Make sure they are entirely full (to 186 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 187 // byte entry and the os page size is 4096, the maximum heap size should 188 // be 512*4096 = 2MB aligned. 189 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 190 191 // Parallel GC does its own alignment of the generations to avoid requiring a 192 // large page (256M on some platforms) for the permanent generation. The 193 // other collectors should also be updated to do their own alignment and then 194 // this use of lcm() should be removed. 195 if (UseLargePages && !UseParallelGC) { 196 // in presence of large pages we have to make sure that our 197 // alignment is large page aware 198 alignment = lcm(os::large_page_size(), alignment); 199 } 200 201 return alignment; 202 } 203 204 void GenCollectorPolicy::initialize_flags() { 205 // All sizes must be multiples of the generation granularity. 206 set_min_alignment((uintx) Generation::GenGrain); 207 set_max_alignment(compute_max_alignment()); 208 assert(max_alignment() >= min_alignment() && 209 max_alignment() % min_alignment() == 0, 210 "invalid alignment constraints"); 211 212 CollectorPolicy::initialize_flags(); 213 214 // All generational heaps have a youngest gen; handle those flags here. 215 216 // Adjust max size parameters 217 if (NewSize > MaxNewSize) { 218 MaxNewSize = NewSize; 219 } 220 NewSize = align_size_down(NewSize, min_alignment()); 221 MaxNewSize = align_size_down(MaxNewSize, min_alignment()); 222 223 // Check validity of heap flags 224 assert(NewSize % min_alignment() == 0, "eden space alignment"); 225 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); 226 227 if (NewSize < 3*min_alignment()) { 228 // make sure there room for eden and two survivor spaces 229 vm_exit_during_initialization("Too small new size specified"); 230 } 231 if (SurvivorRatio < 1 || NewRatio < 1) { 232 vm_exit_during_initialization("Invalid heap ratio specified"); 233 } 234 } 235 236 void TwoGenerationCollectorPolicy::initialize_flags() { 237 GenCollectorPolicy::initialize_flags(); 238 239 OldSize = align_size_down(OldSize, min_alignment()); 240 241 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { 242 // NewRatio will be used later to set the young generation size so we use 243 // it to calculate how big the heap should be based on the requested OldSize 244 // and NewRatio. 245 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 246 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 247 248 calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); 249 MaxHeapSize = calculated_heapsize; 250 InitialHeapSize = calculated_heapsize; 251 } 252 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 253 254 // adjust max heap size if necessary 255 if (NewSize + OldSize > MaxHeapSize) { 256 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 257 // somebody set a maximum heap size with the intention that we should not 258 // exceed it. Adjust New/OldSize as necessary. 259 uintx calculated_size = NewSize + OldSize; 260 double shrink_factor = (double) MaxHeapSize / calculated_size; 261 // align 262 NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 263 // OldSize is already aligned because above we aligned MaxHeapSize to 264 // max_alignment(), and we just made sure that NewSize is aligned to 265 // min_alignment(). In initialize_flags() we verified that max_alignment() 266 // is a multiple of min_alignment(). 267 OldSize = MaxHeapSize - NewSize; 268 } else { 269 MaxHeapSize = NewSize + OldSize; 270 } 271 } 272 // need to do this again 273 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 274 275 always_do_update_barrier = UseConcMarkSweepGC; 276 277 // Check validity of heap flags 278 assert(OldSize % min_alignment() == 0, "old space alignment"); 279 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); 280 } 281 282 // Values set on the command line win over any ergonomically 283 // set command line parameters. 284 // Ergonomic choice of parameters are done before this 285 // method is called. Values for command line parameters such as NewSize 286 // and MaxNewSize feed those ergonomic choices into this method. 287 // This method makes the final generation sizings consistent with 288 // themselves and with overall heap sizings. 289 // In the absence of explicitly set command line flags, policies 290 // such as the use of NewRatio are used to size the generation. 291 void GenCollectorPolicy::initialize_size_info() { 292 CollectorPolicy::initialize_size_info(); 293 294 // min_alignment() is used for alignment within a generation. 295 // There is additional alignment done down stream for some 296 // collectors that sometimes causes unwanted rounding up of 297 // generations sizes. 298 299 // Determine maximum size of gen0 300 301 size_t max_new_size = 0; 302 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 303 if (MaxNewSize < min_alignment()) { 304 max_new_size = min_alignment(); 305 } 306 if (MaxNewSize >= max_heap_byte_size()) { 307 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 308 min_alignment()); 309 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 310 "greater than the entire heap (" SIZE_FORMAT "k). A " 311 "new generation size of " SIZE_FORMAT "k will be used.", 312 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); 313 } else { 314 max_new_size = align_size_down(MaxNewSize, min_alignment()); 315 } 316 317 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 318 // specially at this point to just use an ergonomically set 319 // MaxNewSize to set max_new_size. For cases with small 320 // heaps such a policy often did not work because the MaxNewSize 321 // was larger than the entire heap. The interpretation given 322 // to ergonomically set flags is that the flags are set 323 // by different collectors for their own special needs but 324 // are not allowed to badly shape the heap. This allows the 325 // different collectors to decide what's best for themselves 326 // without having to factor in the overall heap shape. It 327 // can be the case in the future that the collectors would 328 // only make "wise" ergonomics choices and this policy could 329 // just accept those choices. The choices currently made are 330 // not always "wise". 331 } else { 332 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); 333 // Bound the maximum size by NewSize below (since it historically 334 // would have been NewSize and because the NewRatio calculation could 335 // yield a size that is too small) and bound it by MaxNewSize above. 336 // Ergonomics plays here by previously calculating the desired 337 // NewSize and MaxNewSize. 338 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 339 } 340 assert(max_new_size > 0, "All paths should set max_new_size"); 341 342 // Given the maximum gen0 size, determine the initial and 343 // minimum gen0 sizes. 344 345 if (max_heap_byte_size() == min_heap_byte_size()) { 346 // The maximum and minimum heap sizes are the same so 347 // the generations minimum and initial must be the 348 // same as its maximum. 349 set_min_gen0_size(max_new_size); 350 set_initial_gen0_size(max_new_size); 351 set_max_gen0_size(max_new_size); 352 } else { 353 size_t desired_new_size = 0; 354 if (!FLAG_IS_DEFAULT(NewSize)) { 355 // If NewSize is set ergonomically (for example by cms), it 356 // would make sense to use it. If it is used, also use it 357 // to set the initial size. Although there is no reason 358 // the minimum size and the initial size have to be the same, 359 // the current implementation gets into trouble during the calculation 360 // of the tenured generation sizes if they are different. 361 // Note that this makes the initial size and the minimum size 362 // generally small compared to the NewRatio calculation. 363 _min_gen0_size = NewSize; 364 desired_new_size = NewSize; 365 max_new_size = MAX2(max_new_size, NewSize); 366 } else { 367 // For the case where NewSize is the default, use NewRatio 368 // to size the minimum and initial generation sizes. 369 // Use the default NewSize as the floor for these values. If 370 // NewRatio is overly large, the resulting sizes can be too 371 // small. 372 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), 373 NewSize); 374 desired_new_size = 375 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), 376 NewSize); 377 } 378 379 assert(_min_gen0_size > 0, "Sanity check"); 380 set_initial_gen0_size(desired_new_size); 381 set_max_gen0_size(max_new_size); 382 383 // At this point the desirable initial and minimum sizes have been 384 // determined without regard to the maximum sizes. 385 386 // Bound the sizes by the corresponding overall heap sizes. 387 set_min_gen0_size( 388 bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); 389 set_initial_gen0_size( 390 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); 391 set_max_gen0_size( 392 bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); 393 394 // At this point all three sizes have been checked against the 395 // maximum sizes but have not been checked for consistency 396 // among the three. 397 398 // Final check min <= initial <= max 399 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); 400 set_initial_gen0_size( 401 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); 402 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); 403 } 404 405 if (PrintGCDetails && Verbose) { 406 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 407 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 408 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 409 } 410 } 411 412 // Call this method during the sizing of the gen1 to make 413 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 414 // the most freedom in sizing because it is done before the 415 // policy for gen1 is applied. Once gen1 policies have been applied, 416 // there may be conflicts in the shape of the heap and this method 417 // is used to make the needed adjustments. The application of the 418 // policies could be more sophisticated (iterative for example) but 419 // keeping it simple also seems a worthwhile goal. 420 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 421 size_t* gen1_size_ptr, 422 const size_t heap_size, 423 const size_t min_gen1_size) { 424 bool result = false; 425 426 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 427 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 428 (heap_size >= min_gen1_size + min_alignment())) { 429 // Adjust gen0 down to accommodate min_gen1_size 430 *gen0_size_ptr = heap_size - min_gen1_size; 431 *gen0_size_ptr = 432 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), 433 min_alignment()); 434 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 435 result = true; 436 } else { 437 *gen1_size_ptr = heap_size - *gen0_size_ptr; 438 *gen1_size_ptr = 439 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), 440 min_alignment()); 441 } 442 } 443 return result; 444 } 445 446 // Minimum sizes of the generations may be different than 447 // the initial sizes. An inconsistently is permitted here 448 // in the total size that can be specified explicitly by 449 // command line specification of OldSize and NewSize and 450 // also a command line specification of -Xms. Issue a warning 451 // but allow the values to pass. 452 453 void TwoGenerationCollectorPolicy::initialize_size_info() { 454 GenCollectorPolicy::initialize_size_info(); 455 456 // At this point the minimum, initial and maximum sizes 457 // of the overall heap and of gen0 have been determined. 458 // The maximum gen1 size can be determined from the maximum gen0 459 // and maximum heap size since no explicit flags exits 460 // for setting the gen1 maximum. 461 _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 462 _max_gen1_size = 463 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), 464 min_alignment()); 465 // If no explicit command line flag has been set for the 466 // gen1 size, use what is left for gen1. 467 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 468 // The user has not specified any value or ergonomics 469 // has chosen a value (which may or may not be consistent 470 // with the overall heap size). In either case make 471 // the minimum, maximum and initial sizes consistent 472 // with the gen0 sizes and the overall heap sizes. 473 assert(min_heap_byte_size() > _min_gen0_size, 474 "gen0 has an unexpected minimum size"); 475 set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); 476 set_min_gen1_size( 477 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), 478 min_alignment())); 479 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); 480 set_initial_gen1_size( 481 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), 482 min_alignment())); 483 484 } else { 485 // It's been explicitly set on the command line. Use the 486 // OldSize and then determine the consequences. 487 set_min_gen1_size(OldSize); 488 set_initial_gen1_size(OldSize); 489 490 // If the user has explicitly set an OldSize that is inconsistent 491 // with other command line flags, issue a warning. 492 // The generation minimums and the overall heap mimimum should 493 // be within one heap alignment. 494 if ((_min_gen1_size + _min_gen0_size + min_alignment()) < 495 min_heap_byte_size()) { 496 warning("Inconsistency between minimum heap size and minimum " 497 "generation sizes: using minimum heap = " SIZE_FORMAT, 498 min_heap_byte_size()); 499 } 500 if ((OldSize > _max_gen1_size)) { 501 warning("Inconsistency between maximum heap size and maximum " 502 "generation sizes: using maximum heap = " SIZE_FORMAT 503 " -XX:OldSize flag is being ignored", 504 max_heap_byte_size()); 505 } 506 // If there is an inconsistency between the OldSize and the minimum and/or 507 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 508 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 509 min_heap_byte_size(), OldSize)) { 510 if (PrintGCDetails && Verbose) { 511 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 512 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 513 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 514 } 515 } 516 // Initial size 517 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 518 initial_heap_byte_size(), OldSize)) { 519 if (PrintGCDetails && Verbose) { 520 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 521 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 522 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 523 } 524 } 525 } 526 // Enforce the maximum gen1 size. 527 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); 528 529 // Check that min gen1 <= initial gen1 <= max gen1 530 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); 531 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); 532 533 if (PrintGCDetails && Verbose) { 534 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 535 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 536 min_gen1_size(), initial_gen1_size(), max_gen1_size()); 537 } 538 } 539 540 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 541 bool is_tlab, 542 bool* gc_overhead_limit_was_exceeded) { 543 GenCollectedHeap *gch = GenCollectedHeap::heap(); 544 545 debug_only(gch->check_for_valid_allocation_state()); 546 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 547 548 // In general gc_overhead_limit_was_exceeded should be false so 549 // set it so here and reset it to true only if the gc time 550 // limit is being exceeded as checked below. 551 *gc_overhead_limit_was_exceeded = false; 552 553 HeapWord* result = NULL; 554 555 // Loop until the allocation is satisified, 556 // or unsatisfied after GC. 557 for (int try_count = 1; /* return or throw */; try_count += 1) { 558 HandleMark hm; // discard any handles allocated in each iteration 559 560 // First allocation attempt is lock-free. 561 Generation *gen0 = gch->get_gen(0); 562 assert(gen0->supports_inline_contig_alloc(), 563 "Otherwise, must do alloc within heap lock"); 564 if (gen0->should_allocate(size, is_tlab)) { 565 result = gen0->par_allocate(size, is_tlab); 566 if (result != NULL) { 567 assert(gch->is_in_reserved(result), "result not in heap"); 568 return result; 569 } 570 } 571 unsigned int gc_count_before; // read inside the Heap_lock locked region 572 { 573 MutexLocker ml(Heap_lock); 574 if (PrintGC && Verbose) { 575 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 576 " attempting locked slow path allocation"); 577 } 578 // Note that only large objects get a shot at being 579 // allocated in later generations. 580 bool first_only = ! should_try_older_generation_allocation(size); 581 582 result = gch->attempt_allocation(size, is_tlab, first_only); 583 if (result != NULL) { 584 assert(gch->is_in_reserved(result), "result not in heap"); 585 return result; 586 } 587 588 if (GC_locker::is_active_and_needs_gc()) { 589 if (is_tlab) { 590 return NULL; // Caller will retry allocating individual object 591 } 592 if (!gch->is_maximal_no_gc()) { 593 // Try and expand heap to satisfy request 594 result = expand_heap_and_allocate(size, is_tlab); 595 // result could be null if we are out of space 596 if (result != NULL) { 597 return result; 598 } 599 } 600 601 // If this thread is not in a jni critical section, we stall 602 // the requestor until the critical section has cleared and 603 // GC allowed. When the critical section clears, a GC is 604 // initiated by the last thread exiting the critical section; so 605 // we retry the allocation sequence from the beginning of the loop, 606 // rather than causing more, now probably unnecessary, GC attempts. 607 JavaThread* jthr = JavaThread::current(); 608 if (!jthr->in_critical()) { 609 MutexUnlocker mul(Heap_lock); 610 // Wait for JNI critical section to be exited 611 GC_locker::stall_until_clear(); 612 continue; 613 } else { 614 if (CheckJNICalls) { 615 fatal("Possible deadlock due to allocating while" 616 " in jni critical section"); 617 } 618 return NULL; 619 } 620 } 621 622 // Read the gc count while the heap lock is held. 623 gc_count_before = Universe::heap()->total_collections(); 624 } 625 626 VM_GenCollectForAllocation op(size, 627 is_tlab, 628 gc_count_before); 629 VMThread::execute(&op); 630 if (op.prologue_succeeded()) { 631 result = op.result(); 632 if (op.gc_locked()) { 633 assert(result == NULL, "must be NULL if gc_locked() is true"); 634 continue; // retry and/or stall as necessary 635 } 636 637 // Allocation has failed and a collection 638 // has been done. If the gc time limit was exceeded the 639 // this time, return NULL so that an out-of-memory 640 // will be thrown. Clear gc_overhead_limit_exceeded 641 // so that the overhead exceeded does not persist. 642 643 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 644 const bool softrefs_clear = all_soft_refs_clear(); 645 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); 646 if (limit_exceeded && softrefs_clear) { 647 *gc_overhead_limit_was_exceeded = true; 648 size_policy()->set_gc_overhead_limit_exceeded(false); 649 if (op.result() != NULL) { 650 CollectedHeap::fill_with_object(op.result(), size); 651 } 652 return NULL; 653 } 654 assert(result == NULL || gch->is_in_reserved(result), 655 "result not in heap"); 656 return result; 657 } 658 659 // Give a warning if we seem to be looping forever. 660 if ((QueuedAllocationWarningCount > 0) && 661 (try_count % QueuedAllocationWarningCount == 0)) { 662 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 663 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 664 } 665 } 666 } 667 668 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 669 bool is_tlab) { 670 GenCollectedHeap *gch = GenCollectedHeap::heap(); 671 HeapWord* result = NULL; 672 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 673 Generation *gen = gch->get_gen(i); 674 if (gen->should_allocate(size, is_tlab)) { 675 result = gen->expand_and_allocate(size, is_tlab); 676 } 677 } 678 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 679 return result; 680 } 681 682 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 683 bool is_tlab) { 684 GenCollectedHeap *gch = GenCollectedHeap::heap(); 685 GCCauseSetter x(gch, GCCause::_allocation_failure); 686 HeapWord* result = NULL; 687 688 assert(size != 0, "Precondition violated"); 689 if (GC_locker::is_active_and_needs_gc()) { 690 // GC locker is active; instead of a collection we will attempt 691 // to expand the heap, if there's room for expansion. 692 if (!gch->is_maximal_no_gc()) { 693 result = expand_heap_and_allocate(size, is_tlab); 694 } 695 return result; // could be null if we are out of space 696 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 697 // Do an incremental collection. 698 gch->do_collection(false /* full */, 699 false /* clear_all_soft_refs */, 700 size /* size */, 701 is_tlab /* is_tlab */, 702 number_of_generations() - 1 /* max_level */); 703 } else { 704 if (Verbose && PrintGCDetails) { 705 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 706 } 707 // Try a full collection; see delta for bug id 6266275 708 // for the original code and why this has been simplified 709 // with from-space allocation criteria modified and 710 // such allocation moved out of the safepoint path. 711 gch->do_collection(true /* full */, 712 false /* clear_all_soft_refs */, 713 size /* size */, 714 is_tlab /* is_tlab */, 715 number_of_generations() - 1 /* max_level */); 716 } 717 718 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 719 720 if (result != NULL) { 721 assert(gch->is_in_reserved(result), "result not in heap"); 722 return result; 723 } 724 725 // OK, collection failed, try expansion. 726 result = expand_heap_and_allocate(size, is_tlab); 727 if (result != NULL) { 728 return result; 729 } 730 731 // If we reach this point, we're really out of memory. Try every trick 732 // we can to reclaim memory. Force collection of soft references. Force 733 // a complete compaction of the heap. Any additional methods for finding 734 // free memory should be here, especially if they are expensive. If this 735 // attempt fails, an OOM exception will be thrown. 736 { 737 IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 738 739 gch->do_collection(true /* full */, 740 true /* clear_all_soft_refs */, 741 size /* size */, 742 is_tlab /* is_tlab */, 743 number_of_generations() - 1 /* max_level */); 744 } 745 746 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 747 if (result != NULL) { 748 assert(gch->is_in_reserved(result), "result not in heap"); 749 return result; 750 } 751 752 assert(!should_clear_all_soft_refs(), 753 "Flag should have been handled and cleared prior to this point"); 754 755 // What else? We might try synchronous finalization later. If the total 756 // space available is large enough for the allocation, then a more 757 // complete compaction phase than we've tried so far might be 758 // appropriate. 759 return NULL; 760 } 761 762 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 763 ClassLoaderData* loader_data, 764 size_t word_size, 765 Metaspace::MetadataType mdtype) { 766 uint loop_count = 0; 767 uint gc_count = 0; 768 uint full_gc_count = 0; 769 770 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 771 772 do { 773 MetaWord* result = NULL; 774 if (GC_locker::is_active_and_needs_gc()) { 775 // If the GC_locker is active, just expand and allocate. 776 // If that does not succeed, wait if this thread is not 777 // in a critical section itself. 778 result = 779 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 780 mdtype); 781 if (result != NULL) { 782 return result; 783 } 784 JavaThread* jthr = JavaThread::current(); 785 if (!jthr->in_critical()) { 786 // Wait for JNI critical section to be exited 787 GC_locker::stall_until_clear(); 788 // The GC invoked by the last thread leaving the critical 789 // section will be a young collection and a full collection 790 // is (currently) needed for unloading classes so continue 791 // to the next iteration to get a full GC. 792 continue; 793 } else { 794 if (CheckJNICalls) { 795 fatal("Possible deadlock due to allocating while" 796 " in jni critical section"); 797 } 798 return NULL; 799 } 800 } 801 802 { // Need lock to get self consistent gc_count's 803 MutexLocker ml(Heap_lock); 804 gc_count = Universe::heap()->total_collections(); 805 full_gc_count = Universe::heap()->total_full_collections(); 806 } 807 808 // Generate a VM operation 809 VM_CollectForMetadataAllocation op(loader_data, 810 word_size, 811 mdtype, 812 gc_count, 813 full_gc_count, 814 GCCause::_metadata_GC_threshold); 815 VMThread::execute(&op); 816 817 // If GC was locked out, try again. Check 818 // before checking success because the prologue 819 // could have succeeded and the GC still have 820 // been locked out. 821 if (op.gc_locked()) { 822 continue; 823 } 824 825 if (op.prologue_succeeded()) { 826 return op.result(); 827 } 828 loop_count++; 829 if ((QueuedAllocationWarningCount > 0) && 830 (loop_count % QueuedAllocationWarningCount == 0)) { 831 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 832 " size=%d", loop_count, word_size); 833 } 834 } while (true); // Until a GC is done 835 } 836 837 // Return true if any of the following is true: 838 // . the allocation won't fit into the current young gen heap 839 // . gc locker is occupied (jni critical section) 840 // . heap memory is tight -- the most recent previous collection 841 // was a full collection because a partial collection (would 842 // have) failed and is likely to fail again 843 bool GenCollectorPolicy::should_try_older_generation_allocation( 844 size_t word_size) const { 845 GenCollectedHeap* gch = GenCollectedHeap::heap(); 846 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 847 return (word_size > heap_word_size(gen0_capacity)) 848 || GC_locker::is_active_and_needs_gc() 849 || gch->incremental_collection_failed(); 850 } 851 852 853 // 854 // MarkSweepPolicy methods 855 // 856 857 MarkSweepPolicy::MarkSweepPolicy() { 858 initialize_all(); 859 } 860 861 void MarkSweepPolicy::initialize_generations() { 862 _generations = new GenerationSpecPtr[number_of_generations()]; 863 if (_generations == NULL) 864 vm_exit_during_initialization("Unable to allocate gen spec"); 865 866 if (UseParNewGC) { 867 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 868 } else { 869 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 870 } 871 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 872 873 if (_generations[0] == NULL || _generations[1] == NULL) 874 vm_exit_during_initialization("Unable to allocate gen spec"); 875 } 876 877 void MarkSweepPolicy::initialize_gc_policy_counters() { 878 // initialize the policy counters - 2 collectors, 3 generations 879 if (UseParNewGC) { 880 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 881 } else { 882 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 883 } 884 }