1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_ALL_GCS 44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 // CollectorPolicy methods. 49 50 void CollectorPolicy::initialize_flags() { 51 assert(_max_alignment >= _min_alignment, 52 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 53 _max_alignment, _min_alignment)); 54 assert(_max_alignment % _min_alignment == 0, 55 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 56 _max_alignment, _min_alignment)); 57 58 if (MaxHeapSize < InitialHeapSize) { 59 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 60 } 61 62 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment); 63 } 64 65 void CollectorPolicy::initialize_size_info() { 66 // User inputs from -mx and ms must be aligned 67 _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment); 68 _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment); 69 _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment); 70 71 // Check heap parameter properties 72 if (_initial_heap_byte_size < M) { 73 vm_exit_during_initialization("Too small initial heap"); 74 } 75 // Check heap parameter properties 76 if (_min_heap_byte_size < M) { 77 vm_exit_during_initialization("Too small minimum heap"); 78 } 79 if (_initial_heap_byte_size <= NewSize) { 80 // make sure there is at least some room in old space 81 vm_exit_during_initialization("Too small initial heap for new size specified"); 82 } 83 if (_max_heap_byte_size < _min_heap_byte_size) { 84 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 85 } 86 if (_initial_heap_byte_size < _min_heap_byte_size) { 87 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 88 } 89 if (_max_heap_byte_size < _initial_heap_byte_size) { 90 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 91 } 92 93 if (PrintGCDetails && Verbose) { 94 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 95 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 96 _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); 97 } 98 } 99 100 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 101 bool result = _should_clear_all_soft_refs; 102 set_should_clear_all_soft_refs(false); 103 return result; 104 } 105 106 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 107 int max_covered_regions) { 108 assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name"); 109 return new CardTableRS(whole_heap, max_covered_regions); 110 } 111 112 void CollectorPolicy::cleared_all_soft_refs() { 113 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 114 // have been cleared in the last collection but if the gc overhear 115 // limit continues to be near, SoftRefs should still be cleared. 116 if (size_policy() != NULL) { 117 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 118 } 119 _all_soft_refs_clear = true; 120 } 121 122 size_t CollectorPolicy::compute_max_alignment() { 123 // The card marking array and the offset arrays for old generations are 124 // committed in os pages as well. Make sure they are entirely full (to 125 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 126 // byte entry and the os page size is 4096, the maximum heap size should 127 // be 512*4096 = 2MB aligned. 128 129 // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable 130 // is supported. 131 // Requirements of any new remembered set implementations must be added here. 132 size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); 133 134 // Parallel GC does its own alignment of the generations to avoid requiring a 135 // large page (256M on some platforms) for the permanent generation. The 136 // other collectors should also be updated to do their own alignment and then 137 // this use of lcm() should be removed. 138 if (UseLargePages && !UseParallelGC) { 139 // in presence of large pages we have to make sure that our 140 // alignment is large page aware 141 alignment = lcm(os::large_page_size(), alignment); 142 } 143 144 return alignment; 145 } 146 147 // GenCollectorPolicy methods. 148 149 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 150 size_t x = base_size / (NewRatio+1); 151 size_t new_gen_size = x > _min_alignment ? 152 align_size_down(x, _min_alignment) : 153 _min_alignment; 154 return new_gen_size; 155 } 156 157 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 158 size_t maximum_size) { 159 size_t alignment = _min_alignment; 160 size_t max_minus = maximum_size - alignment; 161 return desired_size < max_minus ? desired_size : max_minus; 162 } 163 164 165 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 166 size_t init_promo_size, 167 size_t init_survivor_size) { 168 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 169 _size_policy = new AdaptiveSizePolicy(init_eden_size, 170 init_promo_size, 171 init_survivor_size, 172 max_gc_pause_sec, 173 GCTimeRatio); 174 } 175 176 void GenCollectorPolicy::initialize_flags() { 177 // All sizes must be multiples of the generation granularity. 178 _min_alignment = (uintx) Generation::GenGrain; 179 _max_alignment = compute_max_alignment(); 180 181 CollectorPolicy::initialize_flags(); 182 183 // All generational heaps have a youngest gen; handle those flags here. 184 185 // Adjust max size parameters 186 if (NewSize > MaxNewSize) { 187 MaxNewSize = NewSize; 188 } 189 NewSize = align_size_down(NewSize, _min_alignment); 190 MaxNewSize = align_size_down(MaxNewSize, _min_alignment); 191 192 // Check validity of heap flags 193 assert(NewSize % _min_alignment == 0, "eden space alignment"); 194 assert(MaxNewSize % _min_alignment == 0, "survivor space alignment"); 195 196 if (NewSize < 3 * _min_alignment) { 197 // make sure there room for eden and two survivor spaces 198 vm_exit_during_initialization("Too small new size specified"); 199 } 200 if (SurvivorRatio < 1 || NewRatio < 1) { 201 vm_exit_during_initialization("Invalid young gen ratio specified"); 202 } 203 } 204 205 void TwoGenerationCollectorPolicy::initialize_flags() { 206 GenCollectorPolicy::initialize_flags(); 207 208 OldSize = align_size_down(OldSize, _min_alignment); 209 210 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { 211 // NewRatio will be used later to set the young generation size so we use 212 // it to calculate how big the heap should be based on the requested OldSize 213 // and NewRatio. 214 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 215 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 216 217 calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment); 218 MaxHeapSize = calculated_heapsize; 219 InitialHeapSize = calculated_heapsize; 220 } 221 MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 222 223 // adjust max heap size if necessary 224 if (NewSize + OldSize > MaxHeapSize) { 225 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 226 // somebody set a maximum heap size with the intention that we should not 227 // exceed it. Adjust New/OldSize as necessary. 228 uintx calculated_size = NewSize + OldSize; 229 double shrink_factor = (double) MaxHeapSize / calculated_size; 230 // align 231 NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); 232 // OldSize is already aligned because above we aligned MaxHeapSize to 233 // _max_alignment, and we just made sure that NewSize is aligned to 234 // _min_alignment. In initialize_flags() we verified that _max_alignment 235 // is a multiple of _min_alignment. 236 OldSize = MaxHeapSize - NewSize; 237 } else { 238 MaxHeapSize = NewSize + OldSize; 239 } 240 } 241 // need to do this again 242 MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 243 244 // adjust max heap size if necessary 245 if (NewSize + OldSize > MaxHeapSize) { 246 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 247 // somebody set a maximum heap size with the intention that we should not 248 // exceed it. Adjust New/OldSize as necessary. 249 uintx calculated_size = NewSize + OldSize; 250 double shrink_factor = (double) MaxHeapSize / calculated_size; 251 // align 252 NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); 253 // OldSize is already aligned because above we aligned MaxHeapSize to 254 // _max_alignment, and we just made sure that NewSize is aligned to 255 // _min_alignment. In initialize_flags() we verified that _max_alignment 256 // is a multiple of _min_alignment. 257 OldSize = MaxHeapSize - NewSize; 258 } else { 259 MaxHeapSize = NewSize + OldSize; 260 } 261 } 262 // need to do this again 263 MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 264 265 always_do_update_barrier = UseConcMarkSweepGC; 266 267 // Check validity of heap flags 268 assert(OldSize % _min_alignment == 0, "old space alignment"); 269 assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment"); 270 } 271 272 // Values set on the command line win over any ergonomically 273 // set command line parameters. 274 // Ergonomic choice of parameters are done before this 275 // method is called. Values for command line parameters such as NewSize 276 // and MaxNewSize feed those ergonomic choices into this method. 277 // This method makes the final generation sizings consistent with 278 // themselves and with overall heap sizings. 279 // In the absence of explicitly set command line flags, policies 280 // such as the use of NewRatio are used to size the generation. 281 void GenCollectorPolicy::initialize_size_info() { 282 CollectorPolicy::initialize_size_info(); 283 284 // _min_alignment is used for alignment within a generation. 285 // There is additional alignment done down stream for some 286 // collectors that sometimes causes unwanted rounding up of 287 // generations sizes. 288 289 // Determine maximum size of gen0 290 291 size_t max_new_size = 0; 292 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 293 if (MaxNewSize < _min_alignment) { 294 max_new_size = _min_alignment; 295 } 296 if (MaxNewSize >= _max_heap_byte_size) { 297 max_new_size = align_size_down(_max_heap_byte_size - _min_alignment, 298 _min_alignment); 299 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 300 "greater than the entire heap (" SIZE_FORMAT "k). A " 301 "new generation size of " SIZE_FORMAT "k will be used.", 302 MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K); 303 } else { 304 max_new_size = align_size_down(MaxNewSize, _min_alignment); 305 } 306 307 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 308 // specially at this point to just use an ergonomically set 309 // MaxNewSize to set max_new_size. For cases with small 310 // heaps such a policy often did not work because the MaxNewSize 311 // was larger than the entire heap. The interpretation given 312 // to ergonomically set flags is that the flags are set 313 // by different collectors for their own special needs but 314 // are not allowed to badly shape the heap. This allows the 315 // different collectors to decide what's best for themselves 316 // without having to factor in the overall heap shape. It 317 // can be the case in the future that the collectors would 318 // only make "wise" ergonomics choices and this policy could 319 // just accept those choices. The choices currently made are 320 // not always "wise". 321 } else { 322 max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); 323 // Bound the maximum size by NewSize below (since it historically 324 // would have been NewSize and because the NewRatio calculation could 325 // yield a size that is too small) and bound it by MaxNewSize above. 326 // Ergonomics plays here by previously calculating the desired 327 // NewSize and MaxNewSize. 328 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); 329 } 330 assert(max_new_size > 0, "All paths should set max_new_size"); 331 332 // Given the maximum gen0 size, determine the initial and 333 // minimum gen0 sizes. 334 335 if (_max_heap_byte_size == _min_heap_byte_size) { 336 // The maximum and minimum heap sizes are the same so 337 // the generations minimum and initial must be the 338 // same as its maximum. 339 _min_gen0_size = max_new_size; 340 _initial_gen0_size = max_new_size; 341 _max_gen0_size = max_new_size; 342 } else { 343 size_t desired_new_size = 0; 344 if (!FLAG_IS_DEFAULT(NewSize)) { 345 // If NewSize is set ergonomically (for example by cms), it 346 // would make sense to use it. If it is used, also use it 347 // to set the initial size. Although there is no reason 348 // the minimum size and the initial size have to be the same, 349 // the current implementation gets into trouble during the calculation 350 // of the tenured generation sizes if they are different. 351 // Note that this makes the initial size and the minimum size 352 // generally small compared to the NewRatio calculation. 353 _min_gen0_size = NewSize; 354 desired_new_size = NewSize; 355 max_new_size = MAX2(max_new_size, NewSize); 356 } else { 357 // For the case where NewSize is the default, use NewRatio 358 // to size the minimum and initial generation sizes. 359 // Use the default NewSize as the floor for these values. If 360 // NewRatio is overly large, the resulting sizes can be too 361 // small. 362 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); 363 desired_new_size = 364 MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); 365 } 366 367 assert(_min_gen0_size > 0, "Sanity check"); 368 _initial_gen0_size = desired_new_size; 369 _max_gen0_size = max_new_size; 370 371 // At this point the desirable initial and minimum sizes have been 372 // determined without regard to the maximum sizes. 373 374 // Bound the sizes by the corresponding overall heap sizes. 375 _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); 376 _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); 377 _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); 378 379 // At this point all three sizes have been checked against the 380 // maximum sizes but have not been checked for consistency 381 // among the three. 382 383 // Final check min <= initial <= max 384 _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); 385 _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); 386 _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); 387 } 388 389 if (PrintGCDetails && Verbose) { 390 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 391 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 392 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 393 } 394 } 395 396 // Call this method during the sizing of the gen1 to make 397 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has 398 // the most freedom in sizing because it is done before the 399 // policy for gen1 is applied. Once gen1 policies have been applied, 400 // there may be conflicts in the shape of the heap and this method 401 // is used to make the needed adjustments. The application of the 402 // policies could be more sophisticated (iterative for example) but 403 // keeping it simple also seems a worthwhile goal. 404 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, 405 size_t* gen1_size_ptr, 406 const size_t heap_size, 407 const size_t min_gen1_size) { 408 bool result = false; 409 410 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 411 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 412 (heap_size >= min_gen1_size + _min_alignment)) { 413 // Adjust gen0 down to accommodate min_gen1_size 414 *gen0_size_ptr = heap_size - min_gen1_size; 415 *gen0_size_ptr = 416 MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment); 417 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 418 result = true; 419 } else { 420 *gen1_size_ptr = heap_size - *gen0_size_ptr; 421 *gen1_size_ptr = 422 MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment); 423 } 424 } 425 return result; 426 } 427 428 // Minimum sizes of the generations may be different than 429 // the initial sizes. An inconsistently is permitted here 430 // in the total size that can be specified explicitly by 431 // command line specification of OldSize and NewSize and 432 // also a command line specification of -Xms. Issue a warning 433 // but allow the values to pass. 434 435 void TwoGenerationCollectorPolicy::initialize_size_info() { 436 GenCollectorPolicy::initialize_size_info(); 437 438 // At this point the minimum, initial and maximum sizes 439 // of the overall heap and of gen0 have been determined. 440 // The maximum gen1 size can be determined from the maximum gen0 441 // and maximum heap size since no explicit flags exits 442 // for setting the gen1 maximum. 443 _max_gen1_size = _max_heap_byte_size - _max_gen0_size; 444 _max_gen1_size = 445 MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment); 446 // If no explicit command line flag has been set for the 447 // gen1 size, use what is left for gen1. 448 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 449 // The user has not specified any value or ergonomics 450 // has chosen a value (which may or may not be consistent 451 // with the overall heap size). In either case make 452 // the minimum, maximum and initial sizes consistent 453 // with the gen0 sizes and the overall heap sizes. 454 assert(_min_heap_byte_size > _min_gen0_size, 455 "gen0 has an unexpected minimum size"); 456 _min_gen1_size = _min_heap_byte_size - _min_gen0_size; 457 _min_gen1_size = 458 MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment); 459 _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size; 460 _initial_gen1_size = 461 MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment); 462 } else { 463 // It's been explicitly set on the command line. Use the 464 // OldSize and then determine the consequences. 465 _min_gen1_size = OldSize; 466 _initial_gen1_size = OldSize; 467 468 // If the user has explicitly set an OldSize that is inconsistent 469 // with other command line flags, issue a warning. 470 // The generation minimums and the overall heap mimimum should 471 // be within one heap alignment. 472 if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) { 473 warning("Inconsistency between minimum heap size and minimum " 474 "generation sizes: using minimum heap = " SIZE_FORMAT, 475 _min_heap_byte_size); 476 } 477 if ((OldSize > _max_gen1_size)) { 478 warning("Inconsistency between maximum heap size and maximum " 479 "generation sizes: using maximum heap = " SIZE_FORMAT 480 " -XX:OldSize flag is being ignored", 481 _max_heap_byte_size); 482 } 483 // If there is an inconsistency between the OldSize and the minimum and/or 484 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 485 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 486 _min_heap_byte_size, OldSize)) { 487 if (PrintGCDetails && Verbose) { 488 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 489 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 490 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 491 } 492 } 493 // Initial size 494 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 495 _initial_heap_byte_size, OldSize)) { 496 if (PrintGCDetails && Verbose) { 497 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 498 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 499 _min_gen0_size, _initial_gen0_size, _max_gen0_size); 500 } 501 } 502 } 503 // Enforce the maximum gen1 size. 504 _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); 505 506 // Check that min gen1 <= initial gen1 <= max gen1 507 _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); 508 _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); 509 510 if (PrintGCDetails && Verbose) { 511 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 512 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 513 _min_gen1_size, _initial_gen1_size, _max_gen1_size); 514 } 515 } 516 517 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 518 bool is_tlab, 519 bool* gc_overhead_limit_was_exceeded) { 520 GenCollectedHeap *gch = GenCollectedHeap::heap(); 521 522 debug_only(gch->check_for_valid_allocation_state()); 523 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 524 525 // In general gc_overhead_limit_was_exceeded should be false so 526 // set it so here and reset it to true only if the gc time 527 // limit is being exceeded as checked below. 528 *gc_overhead_limit_was_exceeded = false; 529 530 HeapWord* result = NULL; 531 532 // Loop until the allocation is satisified, 533 // or unsatisfied after GC. 534 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 535 HandleMark hm; // discard any handles allocated in each iteration 536 537 // First allocation attempt is lock-free. 538 Generation *gen0 = gch->get_gen(0); 539 assert(gen0->supports_inline_contig_alloc(), 540 "Otherwise, must do alloc within heap lock"); 541 if (gen0->should_allocate(size, is_tlab)) { 542 result = gen0->par_allocate(size, is_tlab); 543 if (result != NULL) { 544 assert(gch->is_in_reserved(result), "result not in heap"); 545 return result; 546 } 547 } 548 unsigned int gc_count_before; // read inside the Heap_lock locked region 549 { 550 MutexLocker ml(Heap_lock); 551 if (PrintGC && Verbose) { 552 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" 553 " attempting locked slow path allocation"); 554 } 555 // Note that only large objects get a shot at being 556 // allocated in later generations. 557 bool first_only = ! should_try_older_generation_allocation(size); 558 559 result = gch->attempt_allocation(size, is_tlab, first_only); 560 if (result != NULL) { 561 assert(gch->is_in_reserved(result), "result not in heap"); 562 return result; 563 } 564 565 if (GC_locker::is_active_and_needs_gc()) { 566 if (is_tlab) { 567 return NULL; // Caller will retry allocating individual object 568 } 569 if (!gch->is_maximal_no_gc()) { 570 // Try and expand heap to satisfy request 571 result = expand_heap_and_allocate(size, is_tlab); 572 // result could be null if we are out of space 573 if (result != NULL) { 574 return result; 575 } 576 } 577 578 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 579 return NULL; // we didn't get to do a GC and we didn't get any memory 580 } 581 582 // If this thread is not in a jni critical section, we stall 583 // the requestor until the critical section has cleared and 584 // GC allowed. When the critical section clears, a GC is 585 // initiated by the last thread exiting the critical section; so 586 // we retry the allocation sequence from the beginning of the loop, 587 // rather than causing more, now probably unnecessary, GC attempts. 588 JavaThread* jthr = JavaThread::current(); 589 if (!jthr->in_critical()) { 590 MutexUnlocker mul(Heap_lock); 591 // Wait for JNI critical section to be exited 592 GC_locker::stall_until_clear(); 593 gclocker_stalled_count += 1; 594 continue; 595 } else { 596 if (CheckJNICalls) { 597 fatal("Possible deadlock due to allocating while" 598 " in jni critical section"); 599 } 600 return NULL; 601 } 602 } 603 604 // Read the gc count while the heap lock is held. 605 gc_count_before = Universe::heap()->total_collections(); 606 } 607 608 VM_GenCollectForAllocation op(size, 609 is_tlab, 610 gc_count_before); 611 VMThread::execute(&op); 612 if (op.prologue_succeeded()) { 613 result = op.result(); 614 if (op.gc_locked()) { 615 assert(result == NULL, "must be NULL if gc_locked() is true"); 616 continue; // retry and/or stall as necessary 617 } 618 619 // Allocation has failed and a collection 620 // has been done. If the gc time limit was exceeded the 621 // this time, return NULL so that an out-of-memory 622 // will be thrown. Clear gc_overhead_limit_exceeded 623 // so that the overhead exceeded does not persist. 624 625 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 626 const bool softrefs_clear = all_soft_refs_clear(); 627 628 if (limit_exceeded && softrefs_clear) { 629 *gc_overhead_limit_was_exceeded = true; 630 size_policy()->set_gc_overhead_limit_exceeded(false); 631 if (op.result() != NULL) { 632 CollectedHeap::fill_with_object(op.result(), size); 633 } 634 return NULL; 635 } 636 assert(result == NULL || gch->is_in_reserved(result), 637 "result not in heap"); 638 return result; 639 } 640 641 // Give a warning if we seem to be looping forever. 642 if ((QueuedAllocationWarningCount > 0) && 643 (try_count % QueuedAllocationWarningCount == 0)) { 644 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" 645 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); 646 } 647 } 648 } 649 650 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 651 bool is_tlab) { 652 GenCollectedHeap *gch = GenCollectedHeap::heap(); 653 HeapWord* result = NULL; 654 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { 655 Generation *gen = gch->get_gen(i); 656 if (gen->should_allocate(size, is_tlab)) { 657 result = gen->expand_and_allocate(size, is_tlab); 658 } 659 } 660 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 661 return result; 662 } 663 664 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 665 bool is_tlab) { 666 GenCollectedHeap *gch = GenCollectedHeap::heap(); 667 GCCauseSetter x(gch, GCCause::_allocation_failure); 668 HeapWord* result = NULL; 669 670 assert(size != 0, "Precondition violated"); 671 if (GC_locker::is_active_and_needs_gc()) { 672 // GC locker is active; instead of a collection we will attempt 673 // to expand the heap, if there's room for expansion. 674 if (!gch->is_maximal_no_gc()) { 675 result = expand_heap_and_allocate(size, is_tlab); 676 } 677 return result; // could be null if we are out of space 678 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 679 // Do an incremental collection. 680 gch->do_collection(false /* full */, 681 false /* clear_all_soft_refs */, 682 size /* size */, 683 is_tlab /* is_tlab */, 684 number_of_generations() - 1 /* max_level */); 685 } else { 686 if (Verbose && PrintGCDetails) { 687 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 688 } 689 // Try a full collection; see delta for bug id 6266275 690 // for the original code and why this has been simplified 691 // with from-space allocation criteria modified and 692 // such allocation moved out of the safepoint path. 693 gch->do_collection(true /* full */, 694 false /* clear_all_soft_refs */, 695 size /* size */, 696 is_tlab /* is_tlab */, 697 number_of_generations() - 1 /* max_level */); 698 } 699 700 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 701 702 if (result != NULL) { 703 assert(gch->is_in_reserved(result), "result not in heap"); 704 return result; 705 } 706 707 // OK, collection failed, try expansion. 708 result = expand_heap_and_allocate(size, is_tlab); 709 if (result != NULL) { 710 return result; 711 } 712 713 // If we reach this point, we're really out of memory. Try every trick 714 // we can to reclaim memory. Force collection of soft references. Force 715 // a complete compaction of the heap. Any additional methods for finding 716 // free memory should be here, especially if they are expensive. If this 717 // attempt fails, an OOM exception will be thrown. 718 { 719 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 720 721 gch->do_collection(true /* full */, 722 true /* clear_all_soft_refs */, 723 size /* size */, 724 is_tlab /* is_tlab */, 725 number_of_generations() - 1 /* max_level */); 726 } 727 728 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 729 if (result != NULL) { 730 assert(gch->is_in_reserved(result), "result not in heap"); 731 return result; 732 } 733 734 assert(!should_clear_all_soft_refs(), 735 "Flag should have been handled and cleared prior to this point"); 736 737 // What else? We might try synchronous finalization later. If the total 738 // space available is large enough for the allocation, then a more 739 // complete compaction phase than we've tried so far might be 740 // appropriate. 741 return NULL; 742 } 743 744 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 745 ClassLoaderData* loader_data, 746 size_t word_size, 747 Metaspace::MetadataType mdtype) { 748 uint loop_count = 0; 749 uint gc_count = 0; 750 uint full_gc_count = 0; 751 752 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 753 754 do { 755 MetaWord* result = NULL; 756 if (GC_locker::is_active_and_needs_gc()) { 757 // If the GC_locker is active, just expand and allocate. 758 // If that does not succeed, wait if this thread is not 759 // in a critical section itself. 760 result = 761 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 762 mdtype); 763 if (result != NULL) { 764 return result; 765 } 766 JavaThread* jthr = JavaThread::current(); 767 if (!jthr->in_critical()) { 768 // Wait for JNI critical section to be exited 769 GC_locker::stall_until_clear(); 770 // The GC invoked by the last thread leaving the critical 771 // section will be a young collection and a full collection 772 // is (currently) needed for unloading classes so continue 773 // to the next iteration to get a full GC. 774 continue; 775 } else { 776 if (CheckJNICalls) { 777 fatal("Possible deadlock due to allocating while" 778 " in jni critical section"); 779 } 780 return NULL; 781 } 782 } 783 784 { // Need lock to get self consistent gc_count's 785 MutexLocker ml(Heap_lock); 786 gc_count = Universe::heap()->total_collections(); 787 full_gc_count = Universe::heap()->total_full_collections(); 788 } 789 790 // Generate a VM operation 791 VM_CollectForMetadataAllocation op(loader_data, 792 word_size, 793 mdtype, 794 gc_count, 795 full_gc_count, 796 GCCause::_metadata_GC_threshold); 797 VMThread::execute(&op); 798 799 // If GC was locked out, try again. Check 800 // before checking success because the prologue 801 // could have succeeded and the GC still have 802 // been locked out. 803 if (op.gc_locked()) { 804 continue; 805 } 806 807 if (op.prologue_succeeded()) { 808 return op.result(); 809 } 810 loop_count++; 811 if ((QueuedAllocationWarningCount > 0) && 812 (loop_count % QueuedAllocationWarningCount == 0)) { 813 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 814 " size=%d", loop_count, word_size); 815 } 816 } while (true); // Until a GC is done 817 } 818 819 // Return true if any of the following is true: 820 // . the allocation won't fit into the current young gen heap 821 // . gc locker is occupied (jni critical section) 822 // . heap memory is tight -- the most recent previous collection 823 // was a full collection because a partial collection (would 824 // have) failed and is likely to fail again 825 bool GenCollectorPolicy::should_try_older_generation_allocation( 826 size_t word_size) const { 827 GenCollectedHeap* gch = GenCollectedHeap::heap(); 828 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); 829 return (word_size > heap_word_size(gen0_capacity)) 830 || GC_locker::is_active_and_needs_gc() 831 || gch->incremental_collection_failed(); 832 } 833 834 835 // 836 // MarkSweepPolicy methods 837 // 838 839 MarkSweepPolicy::MarkSweepPolicy() { 840 initialize_all(); 841 } 842 843 void MarkSweepPolicy::initialize_generations() { 844 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); 845 if (_generations == NULL) 846 vm_exit_during_initialization("Unable to allocate gen spec"); 847 848 if (UseParNewGC) { 849 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); 850 } else { 851 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); 852 } 853 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); 854 855 if (_generations[0] == NULL || _generations[1] == NULL) 856 vm_exit_during_initialization("Unable to allocate gen spec"); 857 } 858 859 void MarkSweepPolicy::initialize_gc_policy_counters() { 860 // initialize the policy counters - 2 collectors, 3 generations 861 if (UseParNewGC) { 862 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 863 } else { 864 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 865 } 866 }