1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 // CollectorPolicy methods. 49 50 // Align down. If the aligning result in 0, return 'alignment'. 51 static size_t restricted_align_down(size_t size, size_t alignment) { 52 return MAX2(alignment, align_size_down_(size, alignment)); 53 } 54 55 void CollectorPolicy::initialize_flags() { 56 assert(_max_alignment >= _min_alignment, 57 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 58 _max_alignment, _min_alignment)); 59 assert(_max_alignment % _min_alignment == 0, 60 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 61 _max_alignment, _min_alignment)); 62 63 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 64 if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { 65 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 66 } 67 if (Arguments::min_heap_size() != 0 && MaxHeapSize < Arguments::min_heap_size()) { 68 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 69 } 70 _max_heap_size_cmdline = true; 71 } 72 73 if (FLAG_IS_CMDLINE(InitialHeapSize) && Arguments::min_heap_size() != 0 && 74 InitialHeapSize < Arguments::min_heap_size()) { 75 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 76 } 77 if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { 78 FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize); 79 } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) { 80 FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize); 81 if (InitialHeapSize < Arguments::min_heap_size()) { 82 Arguments::set_min_heap_size(InitialHeapSize); 83 } 84 } 85 86 // User inputs from -Xmx and -Xms must be aligned 87 Arguments::set_min_heap_size(align_size_up(Arguments::min_heap_size(), _min_alignment)); 88 uintx alignedInitialHeapSize = align_size_up(InitialHeapSize, _min_alignment); 89 uintx alignedMaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 90 91 // Write back to flags if the values was changed 92 if (alignedInitialHeapSize != InitialHeapSize) { 93 FLAG_SET_ERGO(uintx, InitialHeapSize, alignedInitialHeapSize); 94 } 95 if (alignedMaxHeapSize != MaxHeapSize) { 96 FLAG_SET_ERGO(uintx, MaxHeapSize, alignedMaxHeapSize); 97 } 98 99 assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); 100 assert(InitialHeapSize % _min_alignment == 0, "InitialHeapSize alignment"); 101 assert(MaxHeapSize % _max_alignment == 0, "MaxHeapSize alignment"); 102 103 if (!is_size_aligned(MaxMetaspaceSize, _max_alignment)) { 104 FLAG_SET_ERGO(uintx, MaxMetaspaceSize, 105 restricted_align_down(MaxMetaspaceSize, _max_alignment)); 106 } 107 108 if (MetaspaceSize > MaxMetaspaceSize) { 109 FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize); 110 } 111 112 if (!is_size_aligned(MetaspaceSize, _min_alignment)) { 113 FLAG_SET_ERGO(uintx, MetaspaceSize, 114 restricted_align_down(MetaspaceSize, _min_alignment)); 115 } 116 117 assert(MetaspaceSize <= MaxMetaspaceSize, "Must be"); 118 119 MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _min_alignment); 120 MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _min_alignment); 121 122 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment); 123 124 assert(MetaspaceSize % _min_alignment == 0, "metapace alignment"); 125 assert(MaxMetaspaceSize % _max_alignment == 0, "maximum metaspace alignment"); 126 if (MetaspaceSize < 256*K) { 127 vm_exit_during_initialization("Too small initial Metaspace size"); 128 } 129 } 130 131 void CollectorPolicy::initialize_size_info() { 132 _min_heap_byte_size = Arguments::min_heap_size(); 133 _initial_heap_byte_size = InitialHeapSize; 134 _max_heap_byte_size = MaxHeapSize; 135 136 // Check heap parameter properties 137 if (_initial_heap_byte_size < M) { 138 vm_exit_during_initialization("Too small initial heap"); 139 } 140 // Check heap parameter properties 141 if (_min_heap_byte_size < M) { 142 vm_exit_during_initialization("Too small minimum heap"); 143 } 144 145 assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); 146 assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); 147 assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); 148 assert(_min_heap_byte_size % _min_alignment == 0, "min_heap_byte_size alignment"); 149 assert(_initial_heap_byte_size % _min_alignment == 0, "initial_heap_byte_size alignment"); 150 assert(_max_heap_byte_size % _max_alignment == 0, "max_heap_byte_size alignment"); 151 152 if (PrintGCDetails && Verbose) { 153 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 154 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 155 _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); 156 } 157 } 158 159 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 160 bool result = _should_clear_all_soft_refs; 161 set_should_clear_all_soft_refs(false); 162 return result; 163 } 164 165 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 166 int max_covered_regions) { 167 switch (rem_set_name()) { 168 case GenRemSet::CardTable: { 169 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); 170 return res; 171 } 172 default: 173 guarantee(false, "unrecognized GenRemSet::Name"); 174 return NULL; 175 } 176 } 177 178 void CollectorPolicy::cleared_all_soft_refs() { 179 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 180 // have been cleared in the last collection but if the gc overhear 181 // limit continues to be near, SoftRefs should still be cleared. 182 if (size_policy() != NULL) { 183 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 184 } 185 _all_soft_refs_clear = true; 186 } 187 188 size_t CollectorPolicy::compute_max_alignment() { 189 // The card marking array and the offset arrays for old generations are 190 // committed in os pages as well. Make sure they are entirely full (to 191 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 192 // byte entry and the os page size is 4096, the maximum heap size should 193 // be 512*4096 = 2MB aligned. 194 195 // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable 196 // is supported. 197 // Requirements of any new remembered set implementations must be added here. 198 size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); 199 200 // Parallel GC does its own alignment of the generations to avoid requiring a 201 // large page (256M on some platforms) for the permanent generation. The 202 // other collectors should also be updated to do their own alignment and then 203 // this use of lcm() should be removed. 204 if (UseLargePages && !UseParallelGC) { 205 // in presence of large pages we have to make sure that our 206 // alignment is large page aware 207 alignment = lcm(os::large_page_size(), alignment); 208 } 209 210 return alignment; 211 } 212 213 // GenCollectorPolicy methods. 214 215 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 216 size_t x = base_size / (NewRatio+1); 217 size_t new_gen_size = x > _min_alignment ? 218 align_size_down(x, _min_alignment) : _min_alignment; 219 return new_gen_size; 220 } 221 222 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 223 size_t maximum_size) { 224 size_t alignment = _min_alignment; 225 size_t max_minus = maximum_size - alignment; 226 return desired_size < max_minus ? desired_size : max_minus; 227 } 228 229 230 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 231 size_t init_promo_size, 232 size_t init_survivor_size) { 233 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 234 _size_policy = new AdaptiveSizePolicy(init_eden_size, 235 init_promo_size, 236 init_survivor_size, 237 max_gc_pause_sec, 238 GCTimeRatio); 239 } 240 241 size_t GenCollectorPolicy::compute_max_alignment() { 242 // The card marking array and the offset arrays for old generations are 243 // committed in os pages as well. Make sure they are entirely full (to 244 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 245 // byte entry and the os page size is 4096, the maximum heap size should 246 // be 512*4096 = 2MB aligned. 247 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 248 249 // Parallel GC does its own alignment of the generations to avoid requiring a 250 // large page (256M on some platforms) for the permanent generation. The 251 // other collectors should also be updated to do their own alignment and then 252 // this use of lcm() should be removed. 253 if (UseLargePages && !UseParallelGC) { 254 // in presence of large pages we have to make sure that our 255 // alignment is large page aware 256 alignment = lcm(os::large_page_size(), alignment); 257 } 258 259 assert(alignment >= _min_alignment, "Must be"); 260 261 return alignment; 262 } 263 264 void GenCollectorPolicy::initialize_flags() { 265 // All sizes must be multiples of the generation granularity. 266 _min_alignment = (uintx) Generation::GenGrain; 267 _max_alignment = compute_max_alignment(); 268 269 CollectorPolicy::initialize_flags(); 270 271 // This is the absolute minimum for the young generation. It has to hold two 272 // survivor areas and the eden. We set it here since it is used repeatedly 273 // throughout the initialization. However this is not necessarily the final 274 // value of _min_gen0_size. 275 _min_gen0_size = 3 * intra_heap_alignment(); 276 277 // Make sure the heap is large enough for two generations. 278 uintx smallestHeapSize = _min_gen0_size + intra_heap_alignment(); 279 if (MaxHeapSize < smallestHeapSize) { 280 FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(smallestHeapSize, _max_alignment)); 281 } 282 283 // All generational heaps have a youngest gen; handle those flags here. 284 285 if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) { 286 vm_exit_during_initialization("Incompatible initial and maximum young gen sizes specified"); 287 } 288 289 if (!FLAG_IS_DEFAULT(MaxNewSize)) { 290 uintx minNewSize = MAX2(_min_alignment, _min_gen0_size); 291 292 if (MaxNewSize >= MaxHeapSize) { 293 uintx smallerMaxNewSize = align_size_down(MaxHeapSize - _min_alignment, _min_alignment); 294 if (FLAG_IS_CMDLINE(MaxNewSize)) { 295 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire " 296 "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.", 297 MaxNewSize/K, MaxHeapSize/K, smallerMaxNewSize/K); 298 } 299 FLAG_SET_ERGO(uintx, MaxNewSize, smallerMaxNewSize); 300 if (NewSize > MaxNewSize) { 301 FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); 302 } 303 } else if (MaxNewSize < minNewSize) { 304 FLAG_SET_ERGO(uintx, MaxNewSize, align_size_up(minNewSize, _min_alignment)); 305 } else if (!is_size_aligned(MaxNewSize, _min_alignment)) { 306 FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _min_alignment)); 307 } 308 } 309 310 // Young space must be aligned and have room for eden + two survivors. 311 // We will silently increase the NewSize even if the user specified a smaller value. 312 uintx smallestNewSize = MAX2(align_size_up(_min_gen0_size, _min_alignment), 313 align_size_down(NewSize, _min_alignment)); 314 if (smallestNewSize != NewSize) { 315 FLAG_SET_ERGO(uintx, NewSize, smallestNewSize); 316 } 317 318 if (NewSize > MaxNewSize) { 319 // At this point this should only happen if the user specifies a large NewSize or 320 // a small (but not too small) MaxNewSize. 321 if (FLAG_IS_CMDLINE(NewSize)) { 322 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 323 "A new generation size of " SIZE_FORMAT "k will be used.", 324 NewSize/K, MaxNewSize/K, MaxNewSize/K); 325 } 326 FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); 327 } 328 329 assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); 330 assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); 331 assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 332 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); 333 assert(InitialHeapSize % _min_alignment == 0, "InitialHeapSize alignment"); 334 assert(MaxHeapSize % _max_alignment == 0, "MaxHeapSize alignment"); 335 assert(NewSize % _min_alignment == 0, "NewSize alignment"); 336 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _min_alignment == 0, "MaxNewSize alignment"); 337 338 if (SurvivorRatio < 1 || NewRatio < 1) { 339 vm_exit_during_initialization("Invalid young gen ratio specified"); 340 } 341 } 342 343 void TwoGenerationCollectorPolicy::initialize_flags() { 344 GenCollectorPolicy::initialize_flags(); 345 346 if (!is_size_aligned(OldSize, _min_alignment)) { 347 FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _min_alignment)); 348 } 349 350 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { 351 // NewRatio will be used later to set the young generation size so we use 352 // it to calculate how big the heap should be based on the requested OldSize 353 // and NewRatio. 354 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 355 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 356 357 calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment); 358 FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize); 359 FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize); 360 } 361 362 // adjust max heap size if necessary 363 if (NewSize + OldSize > MaxHeapSize) { 364 if (_max_heap_size_cmdline) { 365 // Somebody has set a maximum heap size with the intention that we should not 366 // exceed it. Adjust New/OldSize as necessary. 367 uintx calculated_size = NewSize + OldSize; 368 double shrink_factor = (double) MaxHeapSize / calculated_size; 369 // align 370 FLAG_SET_ERGO(uintx, NewSize, MAX2(_min_gen0_size, (uintx)align_size_down((uintx)(NewSize * shrink_factor), _min_alignment))); 371 372 // OldSize is already aligned because above we aligned MaxHeapSize to 373 // _max_alignment, and we just made sure that NewSize is aligned to 374 // _min_alignment. In initialize_flags() we verified that _max_alignment 375 // is a multiple of _min_alignment. 376 FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize); 377 } else { 378 FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _max_alignment)); 379 } 380 } 381 382 always_do_update_barrier = UseConcMarkSweepGC; 383 384 // Check validity of heap flags 385 assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); 386 assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); 387 assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 388 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); 389 assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); 390 assert(InitialHeapSize % _min_alignment == 0, "InitialHeapSize alignment"); 391 assert(MaxHeapSize % _max_alignment == 0, "MaxHeapSize alignment"); 392 assert(NewSize % _min_alignment == 0, "NewSize alignment"); 393 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _min_alignment == 0, "MaxNewSize alignment"); 394 assert(OldSize % _min_alignment == 0, "OldSize alignment"); 395 } 396 397 // Values set on the command line win over any ergonomically 398 // set command line parameters. 399 // Ergonomic choice of parameters are done before this 400 // method is called. Values for command line parameters such as NewSize 401 // and MaxNewSize feed those ergonomic choices into this method. 402 // This method makes the final generation sizings consistent with 403 // themselves and with overall heap sizings. 404 // In the absence of explicitly set command line flags, policies 405 // such as the use of NewRatio are used to size the generation. 406 void GenCollectorPolicy::initialize_size_info() { 407 CollectorPolicy::initialize_size_info(); 408 409 // _min_alignment is used for alignment within a generation. 410 // There is additional alignment done down stream for some 411 // collectors that sometimes causes unwanted rounding up of 412 // generations sizes. 413 414 // Determine maximum size of gen0 415 416 size_t max_new_size = 0; 417 if (!FLAG_IS_DEFAULT(MaxNewSize)) { 418 max_new_size = MaxNewSize; 419 } else { 420 max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); 421 // Bound the maximum size by NewSize below (since it historically 422 // would have been NewSize and because the NewRatio calculation could 423 // yield a size that is too small) and bound it by MaxNewSize above. 424 // Ergonomics plays here by previously calculating the desired 425 // NewSize and MaxNewSize. 426 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 427 } 428 assert(max_new_size > 0, "All paths should set max_new_size"); 429 430 // Given the maximum gen0 size, determine the initial and 431 // minimum gen0 sizes. 432 433 if (_max_heap_byte_size == _min_heap_byte_size) { 434 // The maximum and minimum heap sizes are the same so 435 // the generations minimum and initial must be the 436 // same as its maximum. 437 _min_gen0_size = max_new_size; 438 _initial_gen0_size = max_new_size; 439 _max_gen0_size = max_new_size; 440 } else { 441 size_t desired_new_size = 0; 442 if (!FLAG_IS_DEFAULT(NewSize)) { 443 // If NewSize is set ergonomically (for example by cms), it 444 // would make sense to use it. If it is used, also use it 445 // to set the initial size. Although there is no reason 446 // the minimum size and the initial size have to be the same, 447 // the current implementation gets into trouble during the calculation 448 // of the tenured generation sizes if they are different. 449 // Note that this makes the initial size and the minimum size 450 // generally small compared to the NewRatio calculation. 451 _min_gen0_size = NewSize; 452 desired_new_size = NewSize; 453 max_new_size = MAX2(max_new_size, NewSize); 454 } else { 455 // For the case where NewSize is the default, use NewRatio 456 // to size the minimum and initial generation sizes. 457 // Use the default NewSize as the floor for these values. If 458 // NewRatio is overly large, the resulting sizes can be too 459 // small. 460 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); 461 desired_new_size = 462 MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); 463 } 464 465 assert(_min_gen0_size > 0, "Sanity check"); 466 _initial_gen0_size = desired_new_size; 467 _max_gen0_size = max_new_size; 468 469 // At this point the desirable initial and minimum sizes have been 470 // determined without regard to the maximum sizes. 471 472 // Bound the sizes by the corresponding overall heap sizes. 473 _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); 474 _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); 475 _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); 476 477 // At this point all three sizes have been checked against the 478 // maximum sizes but have not been checked for consistency 479 // among the three. 480 481 // Final check min <= initial <= max 482 _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); 483 _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); 484 _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); 485 } 486 487 // Write back to flag if necessary 488 if (MaxNewSize != _min_gen0_size) { 489 FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); 490 } 491 assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); 492 assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); 493 assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); 494 assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); 495 assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); 496 assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 497 assert(_min_heap_byte_size % _min_alignment == 0, "min_heap_byte_size alignment"); 498 assert(_initial_heap_byte_size % _min_alignment == 0, "initial_heap_byte_size alignment"); 499 assert(_max_heap_byte_size % _max_alignment == 0, "max_heap_byte_size alignment"); 500 assert(_min_gen0_size % _min_alignment == 0, "_min_gen0_size alignment"); 501 assert(_initial_gen0_size % _min_alignment == 0, "_initial_gen0_size alignment"); 502 assert(_max_gen0_size % _min_alignment == 0, "_max_gen0_size alignment"); 503 504 if (PrintGCDetails && Verbose) { 505 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 506 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 507 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 508 } 509 } 510 511 // Call this method during the sizing of the gen1 to make 512 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 513 // the most freedom in sizing because it is done before the 514 // policy for gen1 is applied. Once gen1 policies have been applied, 515 // there may be conflicts in the shape of the heap and this method 516 // is used to make the needed adjustments. The application of the 517 // policies could be more sophisticated (iterative for example) but 518 // keeping it simple also seems a worthwhile goal. 519 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 520 size_t* gen1_size_ptr, 521 const size_t heap_size, 522 const size_t min_gen1_size) { 523 bool result = false; 524 525 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 526 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 527 (heap_size >= min_gen1_size + _min_alignment)) { 528 // Adjust gen0 down to accommodate min_gen1_size 529 *gen0_size_ptr = 530 MAX2((uintx)align_size_down(heap_size - min_gen1_size, _min_alignment), 531 _min_alignment); 532 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 533 result = true; 534 } else { 535 *gen1_size_ptr = 536 MAX2((uintx)align_size_down(heap_size - *gen0_size_ptr, _min_alignment), 537 _min_alignment); 538 } 539 } 540 return result; 541 } 542 543 // Minimum sizes of the generations may be different than 544 // the initial sizes. An inconsistency is permitted here 545 // in the total size that can be specified explicitly by 546 // command line specification of OldSize and NewSize and 547 // also a command line specification of -Xms. Issue a warning 548 // but allow the values to pass. 549 550 void TwoGenerationCollectorPolicy::initialize_size_info() { 551 GenCollectorPolicy::initialize_size_info(); 552 553 // At this point the minimum, initial and maximum sizes 554 // of the overall heap and of gen0 have been determined. 555 // The maximum gen1 size can be determined from the maximum gen0 556 // and maximum heap size since no explicit flags exist 557 // for setting the gen1 maximum. 558 _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _min_alignment); 559 560 // If no explicit command line flag has been set for the 561 // gen1 size, use what is left for gen1. 562 if (!FLAG_IS_CMDLINE(OldSize)) { 563 // The user has not specified any value but the ergonomics 564 // may have chosen a value (which may or may not be consistent 565 // with the overall heap size). In either case make 566 // the minimum, maximum and initial sizes consistent 567 // with the gen0 sizes and the overall heap sizes. 568 _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _min_alignment); 569 _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _min_alignment); 570 // _max_gen1_size has already been made consistent above 571 FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); 572 } else { 573 // OldSize has been explicitly set on the command line. Use the 574 // OldSize and then determine the consequences. 575 _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); 576 _initial_gen1_size = OldSize; 577 578 // If the user has explicitly set an OldSize that is inconsistent 579 // with other command line flags, issue a warning. 580 // The generation minimums and the overall heap minimum should 581 // be within one heap alignment. 582 if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) { 583 warning("Inconsistency between minimum heap size and minimum " 584 "generation sizes: using minimum heap = " SIZE_FORMAT, 585 _min_heap_byte_size); 586 } 587 if (OldSize > _max_gen1_size) { 588 warning("Inconsistency between maximum heap size and maximum " 589 "generation sizes: using maximum heap = " SIZE_FORMAT 590 " -XX:OldSize flag is being ignored", 591 _max_heap_byte_size); 592 } 593 // If there is an inconsistency between the OldSize and the minimum and/or 594 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 595 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 596 _min_heap_byte_size, _min_gen1_size)) { 597 if (PrintGCDetails && Verbose) { 598 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 599 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 600 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 601 } 602 } 603 // The same as above for the old gen initial size 604 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 605 _initial_heap_byte_size, _initial_gen1_size)) { 606 if (PrintGCDetails && Verbose) { 607 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 608 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 609 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 610 } 611 } 612 // update OldSize 613 FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); 614 } 615 616 _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); 617 618 // Make sure that min gen1 <= initial gen1 <= max gen1 619 _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); 620 _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); 621 622 // check 623 assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); 624 assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); 625 assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); 626 assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); 627 assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 628 assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); 629 assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); 630 assert(_min_heap_byte_size % _min_alignment == 0, "min_heap_byte_size alignment"); 631 assert(_initial_heap_byte_size % _min_alignment == 0, "initial_heap_byte_size alignment"); 632 assert(_max_heap_byte_size % _max_alignment == 0, "max_heap_byte_size alignment"); 633 assert(_min_gen0_size % _min_alignment == 0, "_min_gen0_size alignment"); 634 assert(_initial_gen0_size % _min_alignment == 0, "_initial_gen0_size alignment"); 635 assert(_max_gen0_size % _min_alignment == 0, "_max_gen0_size alignment"); 636 assert(_max_gen1_size % _min_alignment == 0, "_max_gen1_size alignment"); 637 638 assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes"); 639 640 if (PrintGCDetails && Verbose) { 641 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 642 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 643 _min_gen1_size, _initial_gen1_size, _max_gen1_size); 644 } 645 } 646 647 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 648 bool is_tlab, 649 bool* gc_overhead_limit_was_exceeded) { 650 GenCollectedHeap *gch = GenCollectedHeap::heap(); 651 652 debug_only(gch->check_for_valid_allocation_state()); 653 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 654 655 // In general gc_overhead_limit_was_exceeded should be false so 656 // set it so here and reset it to true only if the gc time 657 // limit is being exceeded as checked below. 658 *gc_overhead_limit_was_exceeded = false; 659 660 HeapWord* result = NULL; 661 662 // Loop until the allocation is satisfied, or unsatisfied after GC. 663 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 664 HandleMark hm; // discard any handles allocated in each iteration 665 666 // First allocation attempt is lock-free. 667 Generation *gen0 = gch->get_gen(0); 668 assert(gen0->supports_inline_contig_alloc(), 669 "Otherwise, must do alloc within heap lock"); 670 if (gen0->should_allocate(size, is_tlab)) { 671 result = gen0->par_allocate(size, is_tlab); 672 if (result != NULL) { 673 assert(gch->is_in_reserved(result), "result not in heap"); 674 return result; 675 } 676 } 677 unsigned int gc_count_before; // read inside the Heap_lock locked region 678 { 679 MutexLocker ml(Heap_lock); 680 if (PrintGC && Verbose) { 681 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 682 " attempting locked slow path allocation"); 683 } 684 // Note that only large objects get a shot at being 685 // allocated in later generations. 686 bool first_only = ! should_try_older_generation_allocation(size); 687 688 result = gch->attempt_allocation(size, is_tlab, first_only); 689 if (result != NULL) { 690 assert(gch->is_in_reserved(result), "result not in heap"); 691 return result; 692 } 693 694 if (GC_locker::is_active_and_needs_gc()) { 695 if (is_tlab) { 696 return NULL; // Caller will retry allocating individual object 697 } 698 if (!gch->is_maximal_no_gc()) { 699 // Try and expand heap to satisfy request 700 result = expand_heap_and_allocate(size, is_tlab); 701 // result could be null if we are out of space 702 if (result != NULL) { 703 return result; 704 } 705 } 706 707 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 708 return NULL; // we didn't get to do a GC and we didn't get any memory 709 } 710 711 // If this thread is not in a jni critical section, we stall 712 // the requestor until the critical section has cleared and 713 // GC allowed. When the critical section clears, a GC is 714 // initiated by the last thread exiting the critical section; so 715 // we retry the allocation sequence from the beginning of the loop, 716 // rather than causing more, now probably unnecessary, GC attempts. 717 JavaThread* jthr = JavaThread::current(); 718 if (!jthr->in_critical()) { 719 MutexUnlocker mul(Heap_lock); 720 // Wait for JNI critical section to be exited 721 GC_locker::stall_until_clear(); 722 gclocker_stalled_count += 1; 723 continue; 724 } else { 725 if (CheckJNICalls) { 726 fatal("Possible deadlock due to allocating while" 727 " in jni critical section"); 728 } 729 return NULL; 730 } 731 } 732 733 // Read the gc count while the heap lock is held. 734 gc_count_before = Universe::heap()->total_collections(); 735 } 736 737 VM_GenCollectForAllocation op(size, 738 is_tlab, 739 gc_count_before); 740 VMThread::execute(&op); 741 if (op.prologue_succeeded()) { 742 result = op.result(); 743 if (op.gc_locked()) { 744 assert(result == NULL, "must be NULL if gc_locked() is true"); 745 continue; // retry and/or stall as necessary 746 } 747 748 // Allocation has failed and a collection 749 // has been done. If the gc time limit was exceeded the 750 // this time, return NULL so that an out-of-memory 751 // will be thrown. Clear gc_overhead_limit_exceeded 752 // so that the overhead exceeded does not persist. 753 754 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 755 const bool softrefs_clear = all_soft_refs_clear(); 756 757 if (limit_exceeded && softrefs_clear) { 758 *gc_overhead_limit_was_exceeded = true; 759 size_policy()->set_gc_overhead_limit_exceeded(false); 760 if (op.result() != NULL) { 761 CollectedHeap::fill_with_object(op.result(), size); 762 } 763 return NULL; 764 } 765 assert(result == NULL || gch->is_in_reserved(result), 766 "result not in heap"); 767 return result; 768 } 769 770 // Give a warning if we seem to be looping forever. 771 if ((QueuedAllocationWarningCount > 0) && 772 (try_count % QueuedAllocationWarningCount == 0)) { 773 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 774 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 775 } 776 } 777 } 778 779 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 780 bool is_tlab) { 781 GenCollectedHeap *gch = GenCollectedHeap::heap(); 782 HeapWord* result = NULL; 783 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 784 Generation *gen = gch->get_gen(i); 785 if (gen->should_allocate(size, is_tlab)) { 786 result = gen->expand_and_allocate(size, is_tlab); 787 } 788 } 789 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 790 return result; 791 } 792 793 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 794 bool is_tlab) { 795 GenCollectedHeap *gch = GenCollectedHeap::heap(); 796 GCCauseSetter x(gch, GCCause::_allocation_failure); 797 HeapWord* result = NULL; 798 799 assert(size != 0, "Precondition violated"); 800 if (GC_locker::is_active_and_needs_gc()) { 801 // GC locker is active; instead of a collection we will attempt 802 // to expand the heap, if there's room for expansion. 803 if (!gch->is_maximal_no_gc()) { 804 result = expand_heap_and_allocate(size, is_tlab); 805 } 806 return result; // could be null if we are out of space 807 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 808 // Do an incremental collection. 809 gch->do_collection(false /* full */, 810 false /* clear_all_soft_refs */, 811 size /* size */, 812 is_tlab /* is_tlab */, 813 number_of_generations() - 1 /* max_level */); 814 } else { 815 if (Verbose && PrintGCDetails) { 816 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 817 } 818 // Try a full collection; see delta for bug id 6266275 819 // for the original code and why this has been simplified 820 // with from-space allocation criteria modified and 821 // such allocation moved out of the safepoint path. 822 gch->do_collection(true /* full */, 823 false /* clear_all_soft_refs */, 824 size /* size */, 825 is_tlab /* is_tlab */, 826 number_of_generations() - 1 /* max_level */); 827 } 828 829 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 830 831 if (result != NULL) { 832 assert(gch->is_in_reserved(result), "result not in heap"); 833 return result; 834 } 835 836 // OK, collection failed, try expansion. 837 result = expand_heap_and_allocate(size, is_tlab); 838 if (result != NULL) { 839 return result; 840 } 841 842 // If we reach this point, we're really out of memory. Try every trick 843 // we can to reclaim memory. Force collection of soft references. Force 844 // a complete compaction of the heap. Any additional methods for finding 845 // free memory should be here, especially if they are expensive. If this 846 // attempt fails, an OOM exception will be thrown. 847 { 848 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 849 850 gch->do_collection(true /* full */, 851 true /* clear_all_soft_refs */, 852 size /* size */, 853 is_tlab /* is_tlab */, 854 number_of_generations() - 1 /* max_level */); 855 } 856 857 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 858 if (result != NULL) { 859 assert(gch->is_in_reserved(result), "result not in heap"); 860 return result; 861 } 862 863 assert(!should_clear_all_soft_refs(), 864 "Flag should have been handled and cleared prior to this point"); 865 866 // What else? We might try synchronous finalization later. If the total 867 // space available is large enough for the allocation, then a more 868 // complete compaction phase than we've tried so far might be 869 // appropriate. 870 return NULL; 871 } 872 873 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 874 ClassLoaderData* loader_data, 875 size_t word_size, 876 Metaspace::MetadataType mdtype) { 877 uint loop_count = 0; 878 uint gc_count = 0; 879 uint full_gc_count = 0; 880 881 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 882 883 do { 884 MetaWord* result = NULL; 885 if (GC_locker::is_active_and_needs_gc()) { 886 // If the GC_locker is active, just expand and allocate. 887 // If that does not succeed, wait if this thread is not 888 // in a critical section itself. 889 result = 890 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 891 mdtype); 892 if (result != NULL) { 893 return result; 894 } 895 JavaThread* jthr = JavaThread::current(); 896 if (!jthr->in_critical()) { 897 // Wait for JNI critical section to be exited 898 GC_locker::stall_until_clear(); 899 // The GC invoked by the last thread leaving the critical 900 // section will be a young collection and a full collection 901 // is (currently) needed for unloading classes so continue 902 // to the next iteration to get a full GC. 903 continue; 904 } else { 905 if (CheckJNICalls) { 906 fatal("Possible deadlock due to allocating while" 907 " in jni critical section"); 908 } 909 return NULL; 910 } 911 } 912 913 { // Need lock to get self consistent gc_count's 914 MutexLocker ml(Heap_lock); 915 gc_count = Universe::heap()->total_collections(); 916 full_gc_count = Universe::heap()->total_full_collections(); 917 } 918 919 // Generate a VM operation 920 VM_CollectForMetadataAllocation op(loader_data, 921 word_size, 922 mdtype, 923 gc_count, 924 full_gc_count, 925 GCCause::_metadata_GC_threshold); 926 VMThread::execute(&op); 927 928 // If GC was locked out, try again. Check 929 // before checking success because the prologue 930 // could have succeeded and the GC still have 931 // been locked out. 932 if (op.gc_locked()) { 933 continue; 934 } 935 936 if (op.prologue_succeeded()) { 937 return op.result(); 938 } 939 loop_count++; 940 if ((QueuedAllocationWarningCount > 0) && 941 (loop_count % QueuedAllocationWarningCount == 0)) { 942 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 943 " size=%d", loop_count, word_size); 944 } 945 } while (true); // Until a GC is done 946 } 947 948 // Return true if any of the following is true: 949 // . the allocation won't fit into the current young gen heap 950 // . gc locker is occupied (jni critical section) 951 // . heap memory is tight -- the most recent previous collection 952 // was a full collection because a partial collection (would 953 // have) failed and is likely to fail again 954 bool GenCollectorPolicy::should_try_older_generation_allocation( 955 size_t word_size) const { 956 GenCollectedHeap* gch = GenCollectedHeap::heap(); 957 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 958 return (word_size > heap_word_size(gen0_capacity)) 959 || GC_locker::is_active_and_needs_gc() 960 || gch->incremental_collection_failed(); 961 } 962 963 964 // 965 // MarkSweepPolicy methods 966 // 967 968 MarkSweepPolicy::MarkSweepPolicy() { 969 initialize_all(); 970 } 971 972 void MarkSweepPolicy::initialize_generations() { 973 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); 974 if (_generations == NULL) { 975 vm_exit_during_initialization("Unable to allocate gen spec"); 976 } 977 978 if (UseParNewGC) { 979 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 980 } else { 981 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 982 } 983 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 984 985 if (_generations[0] == NULL || _generations[1] == NULL) { 986 vm_exit_during_initialization("Unable to allocate gen spec"); 987 } 988 } 989 990 void MarkSweepPolicy::initialize_gc_policy_counters() { 991 // initialize the policy counters - 2 collectors, 3 generations 992 if (UseParNewGC) { 993 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 994 } else { 995 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 996 } 997 }