1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 // CollectorPolicy methods. 49 50 // Align down. If the aligning result in 0, return 'alignment'. 51 static size_t restricted_align_down(size_t size, size_t alignment) { 52 return MAX2(alignment, align_size_down_(size, alignment)); 53 } 54 55 void CollectorPolicy::initialize_flags() { 56 assert(max_alignment() >= min_alignment(), 57 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 58 max_alignment(), min_alignment())); 59 assert(max_alignment() % min_alignment() == 0, 60 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 61 max_alignment(), min_alignment())); 62 63 if (MaxHeapSize < InitialHeapSize) { 64 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 65 } 66 67 if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) { 68 MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment()); 69 } 70 71 if (MetaspaceSize > MaxMetaspaceSize) { 72 FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize); 73 } 74 75 if (!is_size_aligned(MetaspaceSize, min_alignment())) { 76 FLAG_SET_ERGO(uintx, MetaspaceSize, 77 restricted_align_down(MetaspaceSize, min_alignment())); 78 } 79 80 assert(MetaspaceSize <= MaxMetaspaceSize, "Must be"); 81 82 MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment()); 83 MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment()); 84 85 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); 86 87 assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); 88 assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); 89 if (MetaspaceSize < 256*K) { 90 vm_exit_during_initialization("Too small initial Metaspace size"); 91 } 92 } 93 94 void CollectorPolicy::initialize_size_info() { 95 // User inputs from -mx and ms must be aligned 96 set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); 97 set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); 98 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); 99 100 // Check heap parameter properties 101 if (initial_heap_byte_size() < M) { 102 vm_exit_during_initialization("Too small initial heap"); 103 } 104 // Check heap parameter properties 105 if (min_heap_byte_size() < M) { 106 vm_exit_during_initialization("Too small minimum heap"); 107 } 108 if (initial_heap_byte_size() <= NewSize) { 109 // make sure there is at least some room in old space 110 vm_exit_during_initialization("Too small initial heap for new size specified"); 111 } 112 if (max_heap_byte_size() < min_heap_byte_size()) { 113 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 114 } 115 if (initial_heap_byte_size() < min_heap_byte_size()) { 116 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 117 } 118 if (max_heap_byte_size() < initial_heap_byte_size()) { 119 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 120 } 121 122 if (PrintGCDetails && Verbose) { 123 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 124 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 125 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); 126 } 127 } 128 129 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 130 bool result = _should_clear_all_soft_refs; 131 set_should_clear_all_soft_refs(false); 132 return result; 133 } 134 135 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 136 int max_covered_regions) { 137 switch (rem_set_name()) { 138 case GenRemSet::CardTable: { 139 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); 140 return res; 141 } 142 default: 143 guarantee(false, "unrecognized GenRemSet::Name"); 144 return NULL; 145 } 146 } 147 148 void CollectorPolicy::cleared_all_soft_refs() { 149 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 150 // have been cleared in the last collection but if the gc overhear 151 // limit continues to be near, SoftRefs should still be cleared. 152 if (size_policy() != NULL) { 153 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 154 } 155 _all_soft_refs_clear = true; 156 } 157 158 size_t CollectorPolicy::compute_max_alignment() { 159 // The card marking array and the offset arrays for old generations are 160 // committed in os pages as well. Make sure they are entirely full (to 161 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 162 // byte entry and the os page size is 4096, the maximum heap size should 163 // be 512*4096 = 2MB aligned. 164 165 // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable 166 // is supported. 167 // Requirements of any new remembered set implementations must be added here. 168 size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); 169 170 // Parallel GC does its own alignment of the generations to avoid requiring a 171 // large page (256M on some platforms) for the permanent generation. The 172 // other collectors should also be updated to do their own alignment and then 173 // this use of lcm() should be removed. 174 if (UseLargePages && !UseParallelGC) { 175 // in presence of large pages we have to make sure that our 176 // alignment is large page aware 177 alignment = lcm(os::large_page_size(), alignment); 178 } 179 180 return alignment; 181 } 182 183 // GenCollectorPolicy methods. 184 185 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 186 size_t x = base_size / (NewRatio+1); 187 size_t new_gen_size = x > min_alignment() ? 188 align_size_down(x, min_alignment()) : 189 min_alignment(); 190 return new_gen_size; 191 } 192 193 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 194 size_t maximum_size) { 195 size_t alignment = min_alignment(); 196 size_t max_minus = maximum_size - alignment; 197 return desired_size < max_minus ? desired_size : max_minus; 198 } 199 200 201 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 202 size_t init_promo_size, 203 size_t init_survivor_size) { 204 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 205 _size_policy = new AdaptiveSizePolicy(init_eden_size, 206 init_promo_size, 207 init_survivor_size, 208 max_gc_pause_sec, 209 GCTimeRatio); 210 } 211 212 void GenCollectorPolicy::initialize_flags() { 213 // All sizes must be multiples of the generation granularity. 214 set_min_alignment((uintx) Generation::GenGrain); 215 set_max_alignment(compute_max_alignment()); 216 217 CollectorPolicy::initialize_flags(); 218 219 // All generational heaps have a youngest gen; handle those flags here. 220 221 // Adjust max size parameters 222 if (NewSize > MaxNewSize) { 223 MaxNewSize = NewSize; 224 } 225 NewSize = align_size_down(NewSize, min_alignment()); 226 MaxNewSize = align_size_down(MaxNewSize, min_alignment()); 227 228 // Check validity of heap flags 229 assert(NewSize % min_alignment() == 0, "eden space alignment"); 230 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); 231 232 if (NewSize < 3*min_alignment()) { 233 // make sure there room for eden and two survivor spaces 234 vm_exit_during_initialization("Too small new size specified"); 235 } 236 if (SurvivorRatio < 1 || NewRatio < 1) { 237 vm_exit_during_initialization("Invalid heap ratio specified"); 238 } 239 } 240 241 void TwoGenerationCollectorPolicy::initialize_flags() { 242 GenCollectorPolicy::initialize_flags(); 243 244 OldSize = align_size_down(OldSize, min_alignment()); 245 246 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { 247 // NewRatio will be used later to set the young generation size so we use 248 // it to calculate how big the heap should be based on the requested OldSize 249 // and NewRatio. 250 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 251 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 252 253 calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); 254 MaxHeapSize = calculated_heapsize; 255 InitialHeapSize = calculated_heapsize; 256 } 257 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 258 259 // adjust max heap size if necessary 260 if (NewSize + OldSize > MaxHeapSize) { 261 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 262 // somebody set a maximum heap size with the intention that we should not 263 // exceed it. Adjust New/OldSize as necessary. 264 uintx calculated_size = NewSize + OldSize; 265 double shrink_factor = (double) MaxHeapSize / calculated_size; 266 // align 267 NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 268 // OldSize is already aligned because above we aligned MaxHeapSize to 269 // max_alignment(), and we just made sure that NewSize is aligned to 270 // min_alignment(). In initialize_flags() we verified that max_alignment() 271 // is a multiple of min_alignment(). 272 OldSize = MaxHeapSize - NewSize; 273 } else { 274 MaxHeapSize = NewSize + OldSize; 275 } 276 } 277 // need to do this again 278 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 279 280 // adjust max heap size if necessary 281 if (NewSize + OldSize > MaxHeapSize) { 282 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 283 // somebody set a maximum heap size with the intention that we should not 284 // exceed it. Adjust New/OldSize as necessary. 285 uintx calculated_size = NewSize + OldSize; 286 double shrink_factor = (double) MaxHeapSize / calculated_size; 287 // align 288 NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 289 // OldSize is already aligned because above we aligned MaxHeapSize to 290 // max_alignment(), and we just made sure that NewSize is aligned to 291 // min_alignment(). In initialize_flags() we verified that max_alignment() 292 // is a multiple of min_alignment(). 293 OldSize = MaxHeapSize - NewSize; 294 } else { 295 MaxHeapSize = NewSize + OldSize; 296 } 297 } 298 // need to do this again 299 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 300 301 always_do_update_barrier = UseConcMarkSweepGC; 302 303 // Check validity of heap flags 304 assert(OldSize % min_alignment() == 0, "old space alignment"); 305 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); 306 } 307 308 // Values set on the command line win over any ergonomically 309 // set command line parameters. 310 // Ergonomic choice of parameters are done before this 311 // method is called. Values for command line parameters such as NewSize 312 // and MaxNewSize feed those ergonomic choices into this method. 313 // This method makes the final generation sizings consistent with 314 // themselves and with overall heap sizings. 315 // In the absence of explicitly set command line flags, policies 316 // such as the use of NewRatio are used to size the generation. 317 void GenCollectorPolicy::initialize_size_info() { 318 CollectorPolicy::initialize_size_info(); 319 320 // min_alignment() is used for alignment within a generation. 321 // There is additional alignment done down stream for some 322 // collectors that sometimes causes unwanted rounding up of 323 // generations sizes. 324 325 // Determine maximum size of gen0 326 327 size_t max_new_size = 0; 328 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 329 if (MaxNewSize < min_alignment()) { 330 max_new_size = min_alignment(); 331 } 332 if (MaxNewSize >= max_heap_byte_size()) { 333 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 334 min_alignment()); 335 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 336 "greater than the entire heap (" SIZE_FORMAT "k). A " 337 "new generation size of " SIZE_FORMAT "k will be used.", 338 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); 339 } else { 340 max_new_size = align_size_down(MaxNewSize, min_alignment()); 341 } 342 343 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 344 // specially at this point to just use an ergonomically set 345 // MaxNewSize to set max_new_size. For cases with small 346 // heaps such a policy often did not work because the MaxNewSize 347 // was larger than the entire heap. The interpretation given 348 // to ergonomically set flags is that the flags are set 349 // by different collectors for their own special needs but 350 // are not allowed to badly shape the heap. This allows the 351 // different collectors to decide what's best for themselves 352 // without having to factor in the overall heap shape. It 353 // can be the case in the future that the collectors would 354 // only make "wise" ergonomics choices and this policy could 355 // just accept those choices. The choices currently made are 356 // not always "wise". 357 } else { 358 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); 359 // Bound the maximum size by NewSize below (since it historically 360 // would have been NewSize and because the NewRatio calculation could 361 // yield a size that is too small) and bound it by MaxNewSize above. 362 // Ergonomics plays here by previously calculating the desired 363 // NewSize and MaxNewSize. 364 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 365 } 366 assert(max_new_size > 0, "All paths should set max_new_size"); 367 368 // Given the maximum gen0 size, determine the initial and 369 // minimum gen0 sizes. 370 371 if (max_heap_byte_size() == min_heap_byte_size()) { 372 // The maximum and minimum heap sizes are the same so 373 // the generations minimum and initial must be the 374 // same as its maximum. 375 set_min_gen0_size(max_new_size); 376 set_initial_gen0_size(max_new_size); 377 set_max_gen0_size(max_new_size); 378 } else { 379 size_t desired_new_size = 0; 380 if (!FLAG_IS_DEFAULT(NewSize)) { 381 // If NewSize is set ergonomically (for example by cms), it 382 // would make sense to use it. If it is used, also use it 383 // to set the initial size. Although there is no reason 384 // the minimum size and the initial size have to be the same, 385 // the current implementation gets into trouble during the calculation 386 // of the tenured generation sizes if they are different. 387 // Note that this makes the initial size and the minimum size 388 // generally small compared to the NewRatio calculation. 389 _min_gen0_size = NewSize; 390 desired_new_size = NewSize; 391 max_new_size = MAX2(max_new_size, NewSize); 392 } else { 393 // For the case where NewSize is the default, use NewRatio 394 // to size the minimum and initial generation sizes. 395 // Use the default NewSize as the floor for these values. If 396 // NewRatio is overly large, the resulting sizes can be too 397 // small. 398 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), 399 NewSize); 400 desired_new_size = 401 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), 402 NewSize); 403 } 404 405 assert(_min_gen0_size > 0, "Sanity check"); 406 set_initial_gen0_size(desired_new_size); 407 set_max_gen0_size(max_new_size); 408 409 // At this point the desirable initial and minimum sizes have been 410 // determined without regard to the maximum sizes. 411 412 // Bound the sizes by the corresponding overall heap sizes. 413 set_min_gen0_size( 414 bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); 415 set_initial_gen0_size( 416 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); 417 set_max_gen0_size( 418 bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); 419 420 // At this point all three sizes have been checked against the 421 // maximum sizes but have not been checked for consistency 422 // among the three. 423 424 // Final check min <= initial <= max 425 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); 426 set_initial_gen0_size( 427 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); 428 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); 429 } 430 431 if (PrintGCDetails && Verbose) { 432 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 433 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 434 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 435 } 436 } 437 438 // Call this method during the sizing of the gen1 to make 439 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 440 // the most freedom in sizing because it is done before the 441 // policy for gen1 is applied. Once gen1 policies have been applied, 442 // there may be conflicts in the shape of the heap and this method 443 // is used to make the needed adjustments. The application of the 444 // policies could be more sophisticated (iterative for example) but 445 // keeping it simple also seems a worthwhile goal. 446 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 447 size_t* gen1_size_ptr, 448 const size_t heap_size, 449 const size_t min_gen1_size) { 450 bool result = false; 451 452 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 453 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 454 (heap_size >= min_gen1_size + min_alignment())) { 455 // Adjust gen0 down to accommodate min_gen1_size 456 *gen0_size_ptr = heap_size - min_gen1_size; 457 *gen0_size_ptr = 458 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), 459 min_alignment()); 460 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 461 result = true; 462 } else { 463 *gen1_size_ptr = heap_size - *gen0_size_ptr; 464 *gen1_size_ptr = 465 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), 466 min_alignment()); 467 } 468 } 469 return result; 470 } 471 472 // Minimum sizes of the generations may be different than 473 // the initial sizes. An inconsistently is permitted here 474 // in the total size that can be specified explicitly by 475 // command line specification of OldSize and NewSize and 476 // also a command line specification of -Xms. Issue a warning 477 // but allow the values to pass. 478 479 void TwoGenerationCollectorPolicy::initialize_size_info() { 480 GenCollectorPolicy::initialize_size_info(); 481 482 // At this point the minimum, initial and maximum sizes 483 // of the overall heap and of gen0 have been determined. 484 // The maximum gen1 size can be determined from the maximum gen0 485 // and maximum heap size since no explicit flags exits 486 // for setting the gen1 maximum. 487 _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 488 _max_gen1_size = 489 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), 490 min_alignment()); 491 // If no explicit command line flag has been set for the 492 // gen1 size, use what is left for gen1. 493 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 494 // The user has not specified any value or ergonomics 495 // has chosen a value (which may or may not be consistent 496 // with the overall heap size). In either case make 497 // the minimum, maximum and initial sizes consistent 498 // with the gen0 sizes and the overall heap sizes. 499 assert(min_heap_byte_size() > _min_gen0_size, 500 "gen0 has an unexpected minimum size"); 501 set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); 502 set_min_gen1_size( 503 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), 504 min_alignment())); 505 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); 506 set_initial_gen1_size( 507 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), 508 min_alignment())); 509 510 } else { 511 // It's been explicitly set on the command line. Use the 512 // OldSize and then determine the consequences. 513 set_min_gen1_size(OldSize); 514 set_initial_gen1_size(OldSize); 515 516 // If the user has explicitly set an OldSize that is inconsistent 517 // with other command line flags, issue a warning. 518 // The generation minimums and the overall heap mimimum should 519 // be within one heap alignment. 520 if ((_min_gen1_size + _min_gen0_size + min_alignment()) < 521 min_heap_byte_size()) { 522 warning("Inconsistency between minimum heap size and minimum " 523 "generation sizes: using minimum heap = " SIZE_FORMAT, 524 min_heap_byte_size()); 525 } 526 if ((OldSize > _max_gen1_size)) { 527 warning("Inconsistency between maximum heap size and maximum " 528 "generation sizes: using maximum heap = " SIZE_FORMAT 529 " -XX:OldSize flag is being ignored", 530 max_heap_byte_size()); 531 } 532 // If there is an inconsistency between the OldSize and the minimum and/or 533 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 534 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 535 min_heap_byte_size(), OldSize)) { 536 if (PrintGCDetails && Verbose) { 537 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 538 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 539 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 540 } 541 } 542 // Initial size 543 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 544 initial_heap_byte_size(), OldSize)) { 545 if (PrintGCDetails && Verbose) { 546 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 547 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 548 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 549 } 550 } 551 } 552 // Enforce the maximum gen1 size. 553 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); 554 555 // Check that min gen1 <= initial gen1 <= max gen1 556 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); 557 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); 558 559 if (PrintGCDetails && Verbose) { 560 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 561 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 562 min_gen1_size(), initial_gen1_size(), max_gen1_size()); 563 } 564 } 565 566 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 567 bool is_tlab, 568 bool* gc_overhead_limit_was_exceeded) { 569 GenCollectedHeap *gch = GenCollectedHeap::heap(); 570 571 debug_only(gch->check_for_valid_allocation_state()); 572 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 573 574 // In general gc_overhead_limit_was_exceeded should be false so 575 // set it so here and reset it to true only if the gc time 576 // limit is being exceeded as checked below. 577 *gc_overhead_limit_was_exceeded = false; 578 579 HeapWord* result = NULL; 580 581 // Loop until the allocation is satisified, 582 // or unsatisfied after GC. 583 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 584 HandleMark hm; // discard any handles allocated in each iteration 585 586 // First allocation attempt is lock-free. 587 Generation *gen0 = gch->get_gen(0); 588 assert(gen0->supports_inline_contig_alloc(), 589 "Otherwise, must do alloc within heap lock"); 590 if (gen0->should_allocate(size, is_tlab)) { 591 result = gen0->par_allocate(size, is_tlab); 592 if (result != NULL) { 593 assert(gch->is_in_reserved(result), "result not in heap"); 594 return result; 595 } 596 } 597 unsigned int gc_count_before; // read inside the Heap_lock locked region 598 { 599 MutexLocker ml(Heap_lock); 600 if (PrintGC && Verbose) { 601 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 602 " attempting locked slow path allocation"); 603 } 604 // Note that only large objects get a shot at being 605 // allocated in later generations. 606 bool first_only = ! should_try_older_generation_allocation(size); 607 608 result = gch->attempt_allocation(size, is_tlab, first_only); 609 if (result != NULL) { 610 assert(gch->is_in_reserved(result), "result not in heap"); 611 return result; 612 } 613 614 if (GC_locker::is_active_and_needs_gc()) { 615 if (is_tlab) { 616 return NULL; // Caller will retry allocating individual object 617 } 618 if (!gch->is_maximal_no_gc()) { 619 // Try and expand heap to satisfy request 620 result = expand_heap_and_allocate(size, is_tlab); 621 // result could be null if we are out of space 622 if (result != NULL) { 623 return result; 624 } 625 } 626 627 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 628 return NULL; // we didn't get to do a GC and we didn't get any memory 629 } 630 631 // If this thread is not in a jni critical section, we stall 632 // the requestor until the critical section has cleared and 633 // GC allowed. When the critical section clears, a GC is 634 // initiated by the last thread exiting the critical section; so 635 // we retry the allocation sequence from the beginning of the loop, 636 // rather than causing more, now probably unnecessary, GC attempts. 637 JavaThread* jthr = JavaThread::current(); 638 if (!jthr->in_critical()) { 639 MutexUnlocker mul(Heap_lock); 640 // Wait for JNI critical section to be exited 641 GC_locker::stall_until_clear(); 642 gclocker_stalled_count += 1; 643 continue; 644 } else { 645 if (CheckJNICalls) { 646 fatal("Possible deadlock due to allocating while" 647 " in jni critical section"); 648 } 649 return NULL; 650 } 651 } 652 653 // Read the gc count while the heap lock is held. 654 gc_count_before = Universe::heap()->total_collections(); 655 } 656 657 VM_GenCollectForAllocation op(size, 658 is_tlab, 659 gc_count_before); 660 VMThread::execute(&op); 661 if (op.prologue_succeeded()) { 662 result = op.result(); 663 if (op.gc_locked()) { 664 assert(result == NULL, "must be NULL if gc_locked() is true"); 665 continue; // retry and/or stall as necessary 666 } 667 668 // Allocation has failed and a collection 669 // has been done. If the gc time limit was exceeded the 670 // this time, return NULL so that an out-of-memory 671 // will be thrown. Clear gc_overhead_limit_exceeded 672 // so that the overhead exceeded does not persist. 673 674 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 675 const bool softrefs_clear = all_soft_refs_clear(); 676 677 if (limit_exceeded && softrefs_clear) { 678 *gc_overhead_limit_was_exceeded = true; 679 size_policy()->set_gc_overhead_limit_exceeded(false); 680 if (op.result() != NULL) { 681 CollectedHeap::fill_with_object(op.result(), size); 682 } 683 return NULL; 684 } 685 assert(result == NULL || gch->is_in_reserved(result), 686 "result not in heap"); 687 return result; 688 } 689 690 // Give a warning if we seem to be looping forever. 691 if ((QueuedAllocationWarningCount > 0) && 692 (try_count % QueuedAllocationWarningCount == 0)) { 693 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 694 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 695 } 696 } 697 } 698 699 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 700 bool is_tlab) { 701 GenCollectedHeap *gch = GenCollectedHeap::heap(); 702 HeapWord* result = NULL; 703 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 704 Generation *gen = gch->get_gen(i); 705 if (gen->should_allocate(size, is_tlab)) { 706 result = gen->expand_and_allocate(size, is_tlab); 707 } 708 } 709 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 710 return result; 711 } 712 713 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 714 bool is_tlab) { 715 GenCollectedHeap *gch = GenCollectedHeap::heap(); 716 GCCauseSetter x(gch, GCCause::_allocation_failure); 717 HeapWord* result = NULL; 718 719 assert(size != 0, "Precondition violated"); 720 if (GC_locker::is_active_and_needs_gc()) { 721 // GC locker is active; instead of a collection we will attempt 722 // to expand the heap, if there's room for expansion. 723 if (!gch->is_maximal_no_gc()) { 724 result = expand_heap_and_allocate(size, is_tlab); 725 } 726 return result; // could be null if we are out of space 727 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 728 // Do an incremental collection. 729 gch->do_collection(false /* full */, 730 false /* clear_all_soft_refs */, 731 size /* size */, 732 is_tlab /* is_tlab */, 733 number_of_generations() - 1 /* max_level */); 734 } else { 735 if (Verbose && PrintGCDetails) { 736 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 737 } 738 // Try a full collection; see delta for bug id 6266275 739 // for the original code and why this has been simplified 740 // with from-space allocation criteria modified and 741 // such allocation moved out of the safepoint path. 742 gch->do_collection(true /* full */, 743 false /* clear_all_soft_refs */, 744 size /* size */, 745 is_tlab /* is_tlab */, 746 number_of_generations() - 1 /* max_level */); 747 } 748 749 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 750 751 if (result != NULL) { 752 assert(gch->is_in_reserved(result), "result not in heap"); 753 return result; 754 } 755 756 // OK, collection failed, try expansion. 757 result = expand_heap_and_allocate(size, is_tlab); 758 if (result != NULL) { 759 return result; 760 } 761 762 // If we reach this point, we're really out of memory. Try every trick 763 // we can to reclaim memory. Force collection of soft references. Force 764 // a complete compaction of the heap. Any additional methods for finding 765 // free memory should be here, especially if they are expensive. If this 766 // attempt fails, an OOM exception will be thrown. 767 { 768 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 769 770 gch->do_collection(true /* full */, 771 true /* clear_all_soft_refs */, 772 size /* size */, 773 is_tlab /* is_tlab */, 774 number_of_generations() - 1 /* max_level */); 775 } 776 777 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 778 if (result != NULL) { 779 assert(gch->is_in_reserved(result), "result not in heap"); 780 return result; 781 } 782 783 assert(!should_clear_all_soft_refs(), 784 "Flag should have been handled and cleared prior to this point"); 785 786 // What else? We might try synchronous finalization later. If the total 787 // space available is large enough for the allocation, then a more 788 // complete compaction phase than we've tried so far might be 789 // appropriate. 790 return NULL; 791 } 792 793 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 794 ClassLoaderData* loader_data, 795 size_t word_size, 796 Metaspace::MetadataType mdtype) { 797 uint loop_count = 0; 798 uint gc_count = 0; 799 uint full_gc_count = 0; 800 801 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 802 803 do { 804 MetaWord* result = NULL; 805 if (GC_locker::is_active_and_needs_gc()) { 806 // If the GC_locker is active, just expand and allocate. 807 // If that does not succeed, wait if this thread is not 808 // in a critical section itself. 809 result = 810 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 811 mdtype); 812 if (result != NULL) { 813 return result; 814 } 815 JavaThread* jthr = JavaThread::current(); 816 if (!jthr->in_critical()) { 817 // Wait for JNI critical section to be exited 818 GC_locker::stall_until_clear(); 819 // The GC invoked by the last thread leaving the critical 820 // section will be a young collection and a full collection 821 // is (currently) needed for unloading classes so continue 822 // to the next iteration to get a full GC. 823 continue; 824 } else { 825 if (CheckJNICalls) { 826 fatal("Possible deadlock due to allocating while" 827 " in jni critical section"); 828 } 829 return NULL; 830 } 831 } 832 833 { // Need lock to get self consistent gc_count's 834 MutexLocker ml(Heap_lock); 835 gc_count = Universe::heap()->total_collections(); 836 full_gc_count = Universe::heap()->total_full_collections(); 837 } 838 839 // Generate a VM operation 840 VM_CollectForMetadataAllocation op(loader_data, 841 word_size, 842 mdtype, 843 gc_count, 844 full_gc_count, 845 GCCause::_metadata_GC_threshold); 846 VMThread::execute(&op); 847 848 // If GC was locked out, try again. Check 849 // before checking success because the prologue 850 // could have succeeded and the GC still have 851 // been locked out. 852 if (op.gc_locked()) { 853 continue; 854 } 855 856 if (op.prologue_succeeded()) { 857 return op.result(); 858 } 859 loop_count++; 860 if ((QueuedAllocationWarningCount > 0) && 861 (loop_count % QueuedAllocationWarningCount == 0)) { 862 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 863 " size=%d", loop_count, word_size); 864 } 865 } while (true); // Until a GC is done 866 } 867 868 // Return true if any of the following is true: 869 // . the allocation won't fit into the current young gen heap 870 // . gc locker is occupied (jni critical section) 871 // . heap memory is tight -- the most recent previous collection 872 // was a full collection because a partial collection (would 873 // have) failed and is likely to fail again 874 bool GenCollectorPolicy::should_try_older_generation_allocation( 875 size_t word_size) const { 876 GenCollectedHeap* gch = GenCollectedHeap::heap(); 877 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 878 return (word_size > heap_word_size(gen0_capacity)) 879 || GC_locker::is_active_and_needs_gc() 880 || gch->incremental_collection_failed(); 881 } 882 883 884 // 885 // MarkSweepPolicy methods 886 // 887 888 MarkSweepPolicy::MarkSweepPolicy() { 889 initialize_all(); 890 } 891 892 void MarkSweepPolicy::initialize_generations() { 893 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); 894 if (_generations == NULL) 895 vm_exit_during_initialization("Unable to allocate gen spec"); 896 897 if (UseParNewGC) { 898 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 899 } else { 900 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 901 } 902 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 903 904 if (_generations[0] == NULL || _generations[1] == NULL) 905 vm_exit_during_initialization("Unable to allocate gen spec"); 906 } 907 908 void MarkSweepPolicy::initialize_gc_policy_counters() { 909 // initialize the policy counters - 2 collectors, 3 generations 910 if (UseParNewGC) { 911 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 912 } else { 913 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 914 } 915 }