1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 // CollectorPolicy methods. 49 50 CollectorPolicy::CollectorPolicy() : 51 _space_alignment(0), 52 _heap_alignment(0), 53 _initial_heap_byte_size(InitialHeapSize), 54 _max_heap_byte_size(MaxHeapSize), 55 _min_heap_byte_size(Arguments::min_heap_size()), 56 _max_heap_size_cmdline(false), 57 _size_policy(NULL), 58 _should_clear_all_soft_refs(false), 59 _all_soft_refs_clear(false) 60 {} 61 62 void CollectorPolicy::assert_flags() { 63 assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); 64 assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment"); 65 assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment"); 66 } 67 68 void CollectorPolicy::assert_size_info() { 69 assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage"); 70 assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage"); 71 assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); 72 assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); 73 assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); 74 assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment"); 75 assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment"); 76 assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment"); 77 } 78 79 void CollectorPolicy::initialize_alignments() { 80 _space_alignment = (uintx) Generation::GenGrain; 81 _heap_alignment = compute_heap_alignment(); 82 } 83 84 void CollectorPolicy::initialize_flags() { 85 assert(_space_alignment != 0, "Space alignment not set up properly"); 86 assert(_heap_alignment != 0, "Heap alignment not set up properly"); 87 assert(_heap_alignment >= _space_alignment, 88 err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT, 89 _heap_alignment, _space_alignment)); 90 assert(_heap_alignment % _space_alignment == 0, 91 err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, 92 _heap_alignment, _space_alignment)); 93 94 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 95 if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { 96 vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size"); 97 } 98 if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) { 99 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 100 } 101 _max_heap_size_cmdline = true; 102 } 103 104 // Check heap parameter properties 105 if (InitialHeapSize < M) { 106 vm_exit_during_initialization("Too small initial heap"); 107 } 108 if (_min_heap_byte_size < M) { 109 vm_exit_during_initialization("Too small minimum heap"); 110 } 111 112 // User inputs from -Xmx and -Xms must be aligned 113 _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment); 114 uintx alignedInitialHeapSize = align_size_up(InitialHeapSize, _heap_alignment); 115 uintx alignedMaxHeapSize = align_size_up(MaxHeapSize, _heap_alignment); 116 117 // Write back to flags if the values changed 118 if (alignedInitialHeapSize != InitialHeapSize) { 119 FLAG_SET_ERGO(uintx, InitialHeapSize, alignedInitialHeapSize); 120 } 121 if (alignedMaxHeapSize != MaxHeapSize) { 122 FLAG_SET_ERGO(uintx, MaxHeapSize, alignedMaxHeapSize); 123 } 124 125 if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 && 126 InitialHeapSize < _min_heap_byte_size) { 127 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 128 } 129 if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { 130 FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize); 131 } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) { 132 FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize); 133 if (InitialHeapSize < _min_heap_byte_size) { 134 _min_heap_byte_size = InitialHeapSize; 135 } 136 } 137 138 _initial_heap_byte_size = InitialHeapSize; 139 _max_heap_byte_size = MaxHeapSize; 140 141 FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment)); 142 143 CollectorPolicy::assert_flags(); 144 } 145 146 void CollectorPolicy::initialize_size_info() { 147 if (PrintGCDetails && Verbose) { 148 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 149 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 150 _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); 151 } 152 153 CollectorPolicy::assert_size_info(); 154 } 155 156 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 157 bool result = _should_clear_all_soft_refs; 158 set_should_clear_all_soft_refs(false); 159 return result; 160 } 161 162 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 163 int max_covered_regions) { 164 return new CardTableRS(whole_heap, max_covered_regions); 165 } 166 167 void CollectorPolicy::cleared_all_soft_refs() { 168 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 169 // have been cleared in the last collection but if the gc overhear 170 // limit continues to be near, SoftRefs should still be cleared. 171 if (size_policy() != NULL) { 172 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 173 } 174 _all_soft_refs_clear = true; 175 } 176 177 size_t CollectorPolicy::compute_heap_alignment() { 178 // The card marking array and the offset arrays for old generations are 179 // committed in os pages as well. Make sure they are entirely full (to 180 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 181 // byte entry and the os page size is 4096, the maximum heap size should 182 // be 512*4096 = 2MB aligned. 183 184 // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable 185 // is supported. 186 // Requirements of any new remembered set implementations must be added here. 187 size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); 188 189 // Parallel GC does its own alignment of the generations to avoid requiring a 190 // large page (256M on some platforms) for the permanent generation. The 191 // other collectors should also be updated to do their own alignment and then 192 // this use of lcm() should be removed. 193 if (UseLargePages && !UseParallelGC) { 194 // in presence of large pages we have to make sure that our 195 // alignment is large page aware 196 alignment = lcm(os::large_page_size(), alignment); 197 } 198 199 return alignment; 200 } 201 202 // GenCollectorPolicy methods. 203 204 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 205 return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment); 206 } 207 208 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 209 size_t maximum_size) { 210 size_t max_minus = maximum_size - _gen_alignment; 211 return desired_size < max_minus ? desired_size : max_minus; 212 } 213 214 215 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 216 size_t init_promo_size, 217 size_t init_survivor_size) { 218 const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; 219 _size_policy = new AdaptiveSizePolicy(init_eden_size, 220 init_promo_size, 221 init_survivor_size, 222 max_gc_pause_sec, 223 GCTimeRatio); 224 } 225 226 size_t GenCollectorPolicy::young_gen_size_lower_bound() { 227 // The young generation must be aligned and have room for eden + two survivors 228 return align_size_up(3 * _space_alignment, _gen_alignment); 229 } 230 231 void GenCollectorPolicy::assert_flags() { 232 CollectorPolicy::assert_flags(); 233 assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); 234 assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 235 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); 236 assert(NewSize % _gen_alignment == 0, "NewSize alignment"); 237 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment"); 238 } 239 240 void TwoGenerationCollectorPolicy::assert_flags() { 241 GenCollectorPolicy::assert_flags(); 242 assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); 243 assert(OldSize % _gen_alignment == 0, "OldSize alignment"); 244 } 245 246 void GenCollectorPolicy::assert_size_info() { 247 CollectorPolicy::assert_size_info(); 248 // GenCollectorPolicy::initialize_size_info may update the MaxNewSize 249 assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); 250 assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage"); 251 assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage"); 252 assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); 253 assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 254 assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment"); 255 assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment"); 256 assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment"); 257 } 258 259 void TwoGenerationCollectorPolicy::assert_size_info() { 260 GenCollectorPolicy::assert_size_info(); 261 assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage"); 262 assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); 263 assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); 264 assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment"); 265 assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment"); 266 assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes"); 267 } 268 269 void GenCollectorPolicy::initialize_alignments() { 270 CollectorPolicy::initialize_alignments(); 271 _gen_alignment = default_gen_alignment(); 272 } 273 274 void GenCollectorPolicy::initialize_flags() { 275 CollectorPolicy::initialize_flags(); 276 277 assert(_gen_alignment != 0, "Generation alignment not set up properly"); 278 assert(_heap_alignment >= _gen_alignment, 279 err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT, 280 _heap_alignment, _gen_alignment)); 281 assert(_gen_alignment % _space_alignment == 0, 282 err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, 283 _gen_alignment, _space_alignment)); 284 285 // All generational heaps have a youngest gen; handle those flags here 286 if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) { 287 vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size"); 288 } 289 290 // Make sure the heap is large enough for two generations 291 uintx smallestNewSize = young_gen_size_lower_bound(); 292 uintx smallestHeapSize = align_size_up(smallestNewSize + align_size_up(_space_alignment, _gen_alignment), 293 _heap_alignment); 294 if (MaxHeapSize < smallestHeapSize) { 295 FLAG_SET_ERGO(uintx, MaxHeapSize, smallestHeapSize); 296 _max_heap_byte_size = MaxHeapSize; 297 } 298 // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size 299 if (_min_heap_byte_size < smallestHeapSize) { 300 _min_heap_byte_size = smallestHeapSize; 301 if (InitialHeapSize < _min_heap_byte_size) { 302 FLAG_SET_ERGO(uintx, InitialHeapSize, smallestHeapSize); 303 _initial_heap_byte_size = smallestHeapSize; 304 } 305 } 306 307 // Now take the actual NewSize into account. We will silently increase NewSize 308 // if the user specified a smaller value. 309 smallestNewSize = MAX2(smallestNewSize, (uintx)align_size_down(NewSize, _gen_alignment)); 310 if (smallestNewSize != NewSize) { 311 FLAG_SET_ERGO(uintx, NewSize, smallestNewSize); 312 } 313 _initial_gen0_size = NewSize; 314 315 if (!FLAG_IS_DEFAULT(MaxNewSize)) { 316 uintx minNewSize = MAX2(_gen_alignment, _min_gen0_size); 317 318 if (MaxNewSize >= MaxHeapSize) { 319 // Make sure there is room for an old generation 320 uintx smallerMaxNewSize = MaxHeapSize - _gen_alignment; 321 if (FLAG_IS_CMDLINE(MaxNewSize)) { 322 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire " 323 "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.", 324 MaxNewSize/K, MaxHeapSize/K, smallerMaxNewSize/K); 325 } 326 FLAG_SET_ERGO(uintx, MaxNewSize, smallerMaxNewSize); 327 if (NewSize > MaxNewSize) { 328 FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); 329 _initial_gen0_size = NewSize; 330 } 331 } else if (MaxNewSize < minNewSize) { 332 FLAG_SET_ERGO(uintx, MaxNewSize, minNewSize); 333 } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { 334 FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); 335 } 336 _max_gen0_size = MaxNewSize; 337 } 338 339 if (NewSize > MaxNewSize) { 340 // At this point this should only happen if the user specifies a large NewSize or 341 // a small (but not too small) MaxNewSize. 342 if (FLAG_IS_CMDLINE(NewSize)) { 343 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 344 "A new generation size of " SIZE_FORMAT "k will be used.", 345 NewSize/K, MaxNewSize/K, MaxNewSize/K); 346 } 347 FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); 348 _initial_gen0_size = NewSize; 349 } 350 351 if (SurvivorRatio < 1 || NewRatio < 1) { 352 vm_exit_during_initialization("Invalid young gen ratio specified"); 353 } 354 355 GenCollectorPolicy::assert_flags(); 356 } 357 358 void TwoGenerationCollectorPolicy::initialize_flags() { 359 GenCollectorPolicy::initialize_flags(); 360 361 if (!is_size_aligned(OldSize, _gen_alignment)) { 362 FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment)); 363 } 364 365 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { 366 // NewRatio will be used later to set the young generation size so we use 367 // it to calculate how big the heap should be based on the requested OldSize 368 // and NewRatio. 369 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 370 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 371 372 calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment); 373 FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize); 374 _max_heap_byte_size = MaxHeapSize; 375 FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize); 376 _initial_heap_byte_size = InitialHeapSize; 377 } 378 379 // adjust max heap size if necessary 380 if (NewSize + OldSize > MaxHeapSize) { 381 if (_max_heap_size_cmdline) { 382 // somebody set a maximum heap size with the intention that we should not 383 // exceed it. Adjust New/OldSize as necessary. 384 uintx calculated_size = NewSize + OldSize; 385 double shrink_factor = (double) MaxHeapSize / calculated_size; 386 uintx smallerNewSize = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment); 387 FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smallerNewSize)); 388 _initial_gen0_size = NewSize; 389 390 // OldSize is already aligned because above we aligned MaxHeapSize to 391 // _heap_alignment, and we just made sure that NewSize is aligned to 392 // _gen_alignment. In initialize_flags() we verified that _heap_alignment 393 // is a multiple of _gen_alignment. 394 FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize); 395 } else { 396 FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment)); 397 _max_heap_byte_size = MaxHeapSize; 398 } 399 } 400 401 always_do_update_barrier = UseConcMarkSweepGC; 402 TwoGenerationCollectorPolicy::assert_flags(); 403 } 404 405 // Values set on the command line win over any ergonomically 406 // set command line parameters. 407 // Ergonomic choice of parameters are done before this 408 // method is called. Values for command line parameters such as NewSize 409 // and MaxNewSize feed those ergonomic choices into this method. 410 // This method makes the final generation sizings consistent with 411 // themselves and with overall heap sizings. 412 // In the absence of explicitly set command line flags, policies 413 // such as the use of NewRatio are used to size the generation. 414 void GenCollectorPolicy::initialize_size_info() { 415 CollectorPolicy::initialize_size_info(); 416 417 // _space_alignment is used for alignment within a generation. 418 // There is additional alignment done down stream for some 419 // collectors that sometimes causes unwanted rounding up of 420 // generations sizes. 421 422 // Determine maximum size of gen0 423 424 size_t max_new_size = 0; 425 if (!FLAG_IS_DEFAULT(MaxNewSize)) { 426 max_new_size = MaxNewSize; 427 } else { 428 max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); 429 // Bound the maximum size by NewSize below (since it historically 430 // would have been NewSize and because the NewRatio calculation could 431 // yield a size that is too small) and bound it by MaxNewSize above. 432 // Ergonomics plays here by previously calculating the desired 433 // NewSize and MaxNewSize. 434 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 435 } 436 assert(max_new_size > 0, "All paths should set max_new_size"); 437 438 // Given the maximum gen0 size, determine the initial and 439 // minimum gen0 sizes. 440 441 if (_max_heap_byte_size == _min_heap_byte_size) { 442 // The maximum and minimum heap sizes are the same so 443 // the generations minimum and initial must be the 444 // same as its maximum. 445 _min_gen0_size = max_new_size; 446 _initial_gen0_size = max_new_size; 447 _max_gen0_size = max_new_size; 448 } else { 449 size_t desired_new_size = 0; 450 if (!FLAG_IS_DEFAULT(NewSize)) { 451 // If NewSize is set ergonomically (for example by cms), it 452 // would make sense to use it. If it is used, also use it 453 // to set the initial size. Although there is no reason 454 // the minimum size and the initial size have to be the same, 455 // the current implementation gets into trouble during the calculation 456 // of the tenured generation sizes if they are different. 457 // Note that this makes the initial size and the minimum size 458 // generally small compared to the NewRatio calculation. 459 _min_gen0_size = NewSize; 460 desired_new_size = NewSize; 461 max_new_size = MAX2(max_new_size, NewSize); 462 } else { 463 // For the case where NewSize is the default, use NewRatio 464 // to size the minimum and initial generation sizes. 465 // Use the default NewSize as the floor for these values. If 466 // NewRatio is overly large, the resulting sizes can be too 467 // small. 468 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); 469 desired_new_size = 470 MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); 471 } 472 473 assert(_min_gen0_size > 0, "Sanity check"); 474 _initial_gen0_size = desired_new_size; 475 _max_gen0_size = max_new_size; 476 477 // At this point the desirable initial and minimum sizes have been 478 // determined without regard to the maximum sizes. 479 480 // Bound the sizes by the corresponding overall heap sizes. 481 _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); 482 _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); 483 _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); 484 485 // At this point all three sizes have been checked against the 486 // maximum sizes but have not been checked for consistency 487 // among the three. 488 489 // Final check min <= initial <= max 490 _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); 491 _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); 492 _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); 493 } 494 495 // Write back to flags if necessary 496 if (NewSize != _initial_gen0_size) { 497 FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); 498 } 499 500 if (MaxNewSize != _max_gen0_size) { 501 FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); 502 } 503 504 if (PrintGCDetails && Verbose) { 505 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 506 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 507 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 508 } 509 510 GenCollectorPolicy::assert_size_info(); 511 } 512 513 // Call this method during the sizing of the gen1 to make 514 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 515 // the most freedom in sizing because it is done before the 516 // policy for gen1 is applied. Once gen1 policies have been applied, 517 // there may be conflicts in the shape of the heap and this method 518 // is used to make the needed adjustments. The application of the 519 // policies could be more sophisticated (iterative for example) but 520 // keeping it simple also seems a worthwhile goal. 521 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 522 size_t* gen1_size_ptr, 523 const size_t heap_size, 524 const size_t min_gen1_size) { 525 bool result = false; 526 527 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 528 uintx smallestNewSize = young_gen_size_lower_bound(); 529 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 530 (heap_size >= min_gen1_size + smallestNewSize)) { 531 // Adjust gen0 down to accommodate min_gen1_size 532 *gen0_size_ptr = align_size_down_bounded(heap_size - min_gen1_size, _gen_alignment); 533 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 534 result = true; 535 } else { 536 *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment); 537 } 538 } 539 return result; 540 } 541 542 // Minimum sizes of the generations may be different than 543 // the initial sizes. An inconsistently is permitted here 544 // in the total size that can be specified explicitly by 545 // command line specification of OldSize and NewSize and 546 // also a command line specification of -Xms. Issue a warning 547 // but allow the values to pass. 548 549 void TwoGenerationCollectorPolicy::initialize_size_info() { 550 GenCollectorPolicy::initialize_size_info(); 551 552 // At this point the minimum, initial and maximum sizes 553 // of the overall heap and of gen0 have been determined. 554 // The maximum gen1 size can be determined from the maximum gen0 555 // and maximum heap size since no explicit flags exits 556 // for setting the gen1 maximum. 557 _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment); 558 559 // If no explicit command line flag has been set for the 560 // gen1 size, use what is left for gen1. 561 if (!FLAG_IS_CMDLINE(OldSize)) { 562 // The user has not specified any value but the ergonomics 563 // may have chosen a value (which may or may not be consistent 564 // with the overall heap size). In either case make 565 // the minimum, maximum and initial sizes consistent 566 // with the gen0 sizes and the overall heap sizes. 567 _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment); 568 _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment); 569 // _max_gen1_size has already been made consistent above 570 FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); 571 } else { 572 // It's been explicitly set on the command line. Use the 573 // OldSize and then determine the consequences. 574 _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); 575 _initial_gen1_size = OldSize; 576 577 // If the user has explicitly set an OldSize that is inconsistent 578 // with other command line flags, issue a warning. 579 // The generation minimums and the overall heap mimimum should 580 // be within one generation alignment. 581 if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) { 582 warning("Inconsistency between minimum heap size and minimum " 583 "generation sizes: using minimum heap = " SIZE_FORMAT, 584 _min_heap_byte_size); 585 } 586 if (OldSize > _max_gen1_size) { 587 warning("Inconsistency between maximum heap size and maximum " 588 "generation sizes: using maximum heap = " SIZE_FORMAT 589 " -XX:OldSize flag is being ignored", 590 _max_heap_byte_size); 591 } 592 // If there is an inconsistency between the OldSize and the minimum and/or 593 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 594 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 595 _min_heap_byte_size, _min_gen1_size)) { 596 if (PrintGCDetails && Verbose) { 597 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 598 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 599 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 600 } 601 } 602 // Initial size 603 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 604 _initial_heap_byte_size, _initial_gen1_size)) { 605 if (PrintGCDetails && Verbose) { 606 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 607 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 608 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 609 } 610 } 611 } 612 // Enforce the maximum gen1 size. 613 _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); 614 615 // Check that min gen1 <= initial gen1 <= max gen1 616 _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); 617 _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); 618 619 // Write back to flags if necessary 620 if (NewSize != _initial_gen0_size) { 621 FLAG_SET_ERGO(uintx, NewSize, _max_gen0_size); 622 } 623 624 if (MaxNewSize != _max_gen0_size) { 625 FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); 626 } 627 628 if (OldSize != _initial_gen1_size) { 629 FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); 630 } 631 632 if (PrintGCDetails && Verbose) { 633 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 634 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 635 _min_gen1_size, _initial_gen1_size, _max_gen1_size); 636 } 637 638 TwoGenerationCollectorPolicy::assert_size_info(); 639 } 640 641 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 642 bool is_tlab, 643 bool* gc_overhead_limit_was_exceeded) { 644 GenCollectedHeap *gch = GenCollectedHeap::heap(); 645 646 debug_only(gch->check_for_valid_allocation_state()); 647 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 648 649 // In general gc_overhead_limit_was_exceeded should be false so 650 // set it so here and reset it to true only if the gc time 651 // limit is being exceeded as checked below. 652 *gc_overhead_limit_was_exceeded = false; 653 654 HeapWord* result = NULL; 655 656 // Loop until the allocation is satisified, 657 // or unsatisfied after GC. 658 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 659 HandleMark hm; // discard any handles allocated in each iteration 660 661 // First allocation attempt is lock-free. 662 Generation *gen0 = gch->get_gen(0); 663 assert(gen0->supports_inline_contig_alloc(), 664 "Otherwise, must do alloc within heap lock"); 665 if (gen0->should_allocate(size, is_tlab)) { 666 result = gen0->par_allocate(size, is_tlab); 667 if (result != NULL) { 668 assert(gch->is_in_reserved(result), "result not in heap"); 669 return result; 670 } 671 } 672 unsigned int gc_count_before; // read inside the Heap_lock locked region 673 { 674 MutexLocker ml(Heap_lock); 675 if (PrintGC && Verbose) { 676 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 677 " attempting locked slow path allocation"); 678 } 679 // Note that only large objects get a shot at being 680 // allocated in later generations. 681 bool first_only = ! should_try_older_generation_allocation(size); 682 683 result = gch->attempt_allocation(size, is_tlab, first_only); 684 if (result != NULL) { 685 assert(gch->is_in_reserved(result), "result not in heap"); 686 return result; 687 } 688 689 if (GC_locker::is_active_and_needs_gc()) { 690 if (is_tlab) { 691 return NULL; // Caller will retry allocating individual object 692 } 693 if (!gch->is_maximal_no_gc()) { 694 // Try and expand heap to satisfy request 695 result = expand_heap_and_allocate(size, is_tlab); 696 // result could be null if we are out of space 697 if (result != NULL) { 698 return result; 699 } 700 } 701 702 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 703 return NULL; // we didn't get to do a GC and we didn't get any memory 704 } 705 706 // If this thread is not in a jni critical section, we stall 707 // the requestor until the critical section has cleared and 708 // GC allowed. When the critical section clears, a GC is 709 // initiated by the last thread exiting the critical section; so 710 // we retry the allocation sequence from the beginning of the loop, 711 // rather than causing more, now probably unnecessary, GC attempts. 712 JavaThread* jthr = JavaThread::current(); 713 if (!jthr->in_critical()) { 714 MutexUnlocker mul(Heap_lock); 715 // Wait for JNI critical section to be exited 716 GC_locker::stall_until_clear(); 717 gclocker_stalled_count += 1; 718 continue; 719 } else { 720 if (CheckJNICalls) { 721 fatal("Possible deadlock due to allocating while" 722 " in jni critical section"); 723 } 724 return NULL; 725 } 726 } 727 728 // Read the gc count while the heap lock is held. 729 gc_count_before = Universe::heap()->total_collections(); 730 } 731 732 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); 733 VMThread::execute(&op); 734 if (op.prologue_succeeded()) { 735 result = op.result(); 736 if (op.gc_locked()) { 737 assert(result == NULL, "must be NULL if gc_locked() is true"); 738 continue; // retry and/or stall as necessary 739 } 740 741 // Allocation has failed and a collection 742 // has been done. If the gc time limit was exceeded the 743 // this time, return NULL so that an out-of-memory 744 // will be thrown. Clear gc_overhead_limit_exceeded 745 // so that the overhead exceeded does not persist. 746 747 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 748 const bool softrefs_clear = all_soft_refs_clear(); 749 750 if (limit_exceeded && softrefs_clear) { 751 *gc_overhead_limit_was_exceeded = true; 752 size_policy()->set_gc_overhead_limit_exceeded(false); 753 if (op.result() != NULL) { 754 CollectedHeap::fill_with_object(op.result(), size); 755 } 756 return NULL; 757 } 758 assert(result == NULL || gch->is_in_reserved(result), 759 "result not in heap"); 760 return result; 761 } 762 763 // Give a warning if we seem to be looping forever. 764 if ((QueuedAllocationWarningCount > 0) && 765 (try_count % QueuedAllocationWarningCount == 0)) { 766 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 767 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 768 } 769 } 770 } 771 772 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 773 bool is_tlab) { 774 GenCollectedHeap *gch = GenCollectedHeap::heap(); 775 HeapWord* result = NULL; 776 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 777 Generation *gen = gch->get_gen(i); 778 if (gen->should_allocate(size, is_tlab)) { 779 result = gen->expand_and_allocate(size, is_tlab); 780 } 781 } 782 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 783 return result; 784 } 785 786 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 787 bool is_tlab) { 788 GenCollectedHeap *gch = GenCollectedHeap::heap(); 789 GCCauseSetter x(gch, GCCause::_allocation_failure); 790 HeapWord* result = NULL; 791 792 assert(size != 0, "Precondition violated"); 793 if (GC_locker::is_active_and_needs_gc()) { 794 // GC locker is active; instead of a collection we will attempt 795 // to expand the heap, if there's room for expansion. 796 if (!gch->is_maximal_no_gc()) { 797 result = expand_heap_and_allocate(size, is_tlab); 798 } 799 return result; // could be null if we are out of space 800 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 801 // Do an incremental collection. 802 gch->do_collection(false /* full */, 803 false /* clear_all_soft_refs */, 804 size /* size */, 805 is_tlab /* is_tlab */, 806 number_of_generations() - 1 /* max_level */); 807 } else { 808 if (Verbose && PrintGCDetails) { 809 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 810 } 811 // Try a full collection; see delta for bug id 6266275 812 // for the original code and why this has been simplified 813 // with from-space allocation criteria modified and 814 // such allocation moved out of the safepoint path. 815 gch->do_collection(true /* full */, 816 false /* clear_all_soft_refs */, 817 size /* size */, 818 is_tlab /* is_tlab */, 819 number_of_generations() - 1 /* max_level */); 820 } 821 822 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 823 824 if (result != NULL) { 825 assert(gch->is_in_reserved(result), "result not in heap"); 826 return result; 827 } 828 829 // OK, collection failed, try expansion. 830 result = expand_heap_and_allocate(size, is_tlab); 831 if (result != NULL) { 832 return result; 833 } 834 835 // If we reach this point, we're really out of memory. Try every trick 836 // we can to reclaim memory. Force collection of soft references. Force 837 // a complete compaction of the heap. Any additional methods for finding 838 // free memory should be here, especially if they are expensive. If this 839 // attempt fails, an OOM exception will be thrown. 840 { 841 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 842 843 gch->do_collection(true /* full */, 844 true /* clear_all_soft_refs */, 845 size /* size */, 846 is_tlab /* is_tlab */, 847 number_of_generations() - 1 /* max_level */); 848 } 849 850 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 851 if (result != NULL) { 852 assert(gch->is_in_reserved(result), "result not in heap"); 853 return result; 854 } 855 856 assert(!should_clear_all_soft_refs(), 857 "Flag should have been handled and cleared prior to this point"); 858 859 // What else? We might try synchronous finalization later. If the total 860 // space available is large enough for the allocation, then a more 861 // complete compaction phase than we've tried so far might be 862 // appropriate. 863 return NULL; 864 } 865 866 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 867 ClassLoaderData* loader_data, 868 size_t word_size, 869 Metaspace::MetadataType mdtype) { 870 uint loop_count = 0; 871 uint gc_count = 0; 872 uint full_gc_count = 0; 873 874 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 875 876 do { 877 MetaWord* result = NULL; 878 if (GC_locker::is_active_and_needs_gc()) { 879 // If the GC_locker is active, just expand and allocate. 880 // If that does not succeed, wait if this thread is not 881 // in a critical section itself. 882 result = 883 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 884 mdtype); 885 if (result != NULL) { 886 return result; 887 } 888 JavaThread* jthr = JavaThread::current(); 889 if (!jthr->in_critical()) { 890 // Wait for JNI critical section to be exited 891 GC_locker::stall_until_clear(); 892 // The GC invoked by the last thread leaving the critical 893 // section will be a young collection and a full collection 894 // is (currently) needed for unloading classes so continue 895 // to the next iteration to get a full GC. 896 continue; 897 } else { 898 if (CheckJNICalls) { 899 fatal("Possible deadlock due to allocating while" 900 " in jni critical section"); 901 } 902 return NULL; 903 } 904 } 905 906 { // Need lock to get self consistent gc_count's 907 MutexLocker ml(Heap_lock); 908 gc_count = Universe::heap()->total_collections(); 909 full_gc_count = Universe::heap()->total_full_collections(); 910 } 911 912 // Generate a VM operation 913 VM_CollectForMetadataAllocation op(loader_data, 914 word_size, 915 mdtype, 916 gc_count, 917 full_gc_count, 918 GCCause::_metadata_GC_threshold); 919 VMThread::execute(&op); 920 921 // If GC was locked out, try again. Check 922 // before checking success because the prologue 923 // could have succeeded and the GC still have 924 // been locked out. 925 if (op.gc_locked()) { 926 continue; 927 } 928 929 if (op.prologue_succeeded()) { 930 return op.result(); 931 } 932 loop_count++; 933 if ((QueuedAllocationWarningCount > 0) && 934 (loop_count % QueuedAllocationWarningCount == 0)) { 935 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 936 " size=%d", loop_count, word_size); 937 } 938 } while (true); // Until a GC is done 939 } 940 941 // Return true if any of the following is true: 942 // . the allocation won't fit into the current young gen heap 943 // . gc locker is occupied (jni critical section) 944 // . heap memory is tight -- the most recent previous collection 945 // was a full collection because a partial collection (would 946 // have) failed and is likely to fail again 947 bool GenCollectorPolicy::should_try_older_generation_allocation( 948 size_t word_size) const { 949 GenCollectedHeap* gch = GenCollectedHeap::heap(); 950 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 951 return (word_size > heap_word_size(gen0_capacity)) 952 || GC_locker::is_active_and_needs_gc() 953 || gch->incremental_collection_failed(); 954 } 955 956 957 // 958 // MarkSweepPolicy methods 959 // 960 961 void MarkSweepPolicy::initialize_generations() { 962 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); 963 if (_generations == NULL) { 964 vm_exit_during_initialization("Unable to allocate gen spec"); 965 } 966 967 if (UseParNewGC) { 968 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 969 } else { 970 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 971 } 972 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 973 974 if (_generations[0] == NULL || _generations[1] == NULL) { 975 vm_exit_during_initialization("Unable to allocate gen spec"); 976 } 977 } 978 979 void MarkSweepPolicy::initialize_gc_policy_counters() { 980 // initialize the policy counters - 2 collectors, 3 generations 981 if (UseParNewGC) { 982 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 983 } else { 984 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 985 } 986 }