1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectorPolicy.hpp" 30 #include "gc/g1/g1ConcurrentMark.hpp" 31 #include "gc/g1/g1IHOPControl.hpp" 32 #include "gc/g1/g1GCPhaseTimes.hpp" 33 #include "gc/g1/heapRegion.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/shared/gcPolicyCounters.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "utilities/debug.hpp" 40 #include "utilities/pair.hpp" 41 42 // Different defaults for different number of GC threads 43 // They were chosen by running GCOld and SPECjbb on debris with different 44 // numbers of GC threads and choosing them based on the results 45 46 // all the same 47 static double rs_length_diff_defaults[] = { 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 49 }; 50 51 static double cost_per_card_ms_defaults[] = { 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 53 }; 54 55 // all the same 56 static double young_cards_per_entry_ratio_defaults[] = { 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 58 }; 59 60 static double cost_per_entry_ms_defaults[] = { 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 62 }; 63 64 static double cost_per_byte_ms_defaults[] = { 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 66 }; 67 68 // these should be pretty consistent 69 static double constant_other_time_ms_defaults[] = { 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 71 }; 72 73 74 static double young_other_cost_per_region_ms_defaults[] = { 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 76 }; 77 78 static double non_young_other_cost_per_region_ms_defaults[] = { 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 80 }; 81 82 G1CollectorPolicy::G1CollectorPolicy() : 83 _predictor(G1ConfidencePercent / 100.0), 84 85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 86 87 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 88 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 89 90 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 91 _prev_collection_pause_end_ms(0.0), 92 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 93 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _non_young_other_cost_per_region_ms_seq( 104 new TruncatedSeq(TruncatedSeqLength)), 105 106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 108 109 _pause_time_target_ms((double) MaxGCPauseMillis), 110 111 _recent_prev_end_times_for_all_gcs_sec( 112 new TruncatedSeq(NumPrevPausesForHeuristics)), 113 114 _recent_avg_pause_time_ratio(0.0), 115 _rs_lengths_prediction(0), 116 _max_survivor_regions(0), 117 118 _eden_cset_region_length(0), 119 _survivor_cset_region_length(0), 120 _old_cset_region_length(0), 121 122 _collection_set(NULL), 123 _collection_set_bytes_used_before(0), 124 125 // Incremental CSet attributes 126 _inc_cset_build_state(Inactive), 127 _inc_cset_head(NULL), 128 _inc_cset_tail(NULL), 129 _inc_cset_bytes_used_before(0), 130 _inc_cset_recorded_rs_lengths(0), 131 _inc_cset_recorded_rs_lengths_diffs(0), 132 _inc_cset_predicted_elapsed_time_ms(0.0), 133 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 134 135 // add here any more surv rate groups 136 _recorded_survivor_regions(0), 137 _recorded_survivor_head(NULL), 138 _recorded_survivor_tail(NULL), 139 _survivors_age_table(true), 140 141 _gc_overhead_perc(0.0), 142 143 _bytes_allocated_in_old_since_last_gc(0), 144 _ihop_control(NULL), 145 _initial_mark_to_mixed() { 146 147 // SurvRateGroups below must be initialized after the predictor because they 148 // indirectly use it through this object passed to their constructor. 149 _short_lived_surv_rate_group = 150 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 151 _survivor_surv_rate_group = 152 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 153 154 // Set up the region size and associated fields. Given that the 155 // policy is created before the heap, we have to set this up here, 156 // so it's done as soon as possible. 157 158 // It would have been natural to pass initial_heap_byte_size() and 159 // max_heap_byte_size() to setup_heap_region_size() but those have 160 // not been set up at this point since they should be aligned with 161 // the region size. So, there is a circular dependency here. We base 162 // the region size on the heap size, but the heap size should be 163 // aligned with the region size. To get around this we use the 164 // unaligned values for the heap. 165 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 166 HeapRegionRemSet::setup_remset_size(); 167 168 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 169 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 170 clear_ratio_check_data(); 171 172 _phase_times = new G1GCPhaseTimes(ParallelGCThreads); 173 174 int index = MIN2(ParallelGCThreads - 1, 7u); 175 176 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 177 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 178 _cost_scan_hcc_seq->add(0.0); 179 _young_cards_per_entry_ratio_seq->add( 180 young_cards_per_entry_ratio_defaults[index]); 181 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 182 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 183 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 184 _young_other_cost_per_region_ms_seq->add( 185 young_other_cost_per_region_ms_defaults[index]); 186 _non_young_other_cost_per_region_ms_seq->add( 187 non_young_other_cost_per_region_ms_defaults[index]); 188 189 // Below, we might need to calculate the pause time target based on 190 // the pause interval. When we do so we are going to give G1 maximum 191 // flexibility and allow it to do pauses when it needs to. So, we'll 192 // arrange that the pause interval to be pause time target + 1 to 193 // ensure that a) the pause time target is maximized with respect to 194 // the pause interval and b) we maintain the invariant that pause 195 // time target < pause interval. If the user does not want this 196 // maximum flexibility, they will have to set the pause interval 197 // explicitly. 198 199 // First make sure that, if either parameter is set, its value is 200 // reasonable. 201 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 202 if (MaxGCPauseMillis < 1) { 203 vm_exit_during_initialization("MaxGCPauseMillis should be " 204 "greater than 0"); 205 } 206 } 207 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 208 if (GCPauseIntervalMillis < 1) { 209 vm_exit_during_initialization("GCPauseIntervalMillis should be " 210 "greater than 0"); 211 } 212 } 213 214 // Then, if the pause time target parameter was not set, set it to 215 // the default value. 216 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 217 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 218 // The default pause time target in G1 is 200ms 219 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 220 } else { 221 // We do not allow the pause interval to be set without the 222 // pause time target 223 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 224 "without setting MaxGCPauseMillis"); 225 } 226 } 227 228 // Then, if the interval parameter was not set, set it according to 229 // the pause time target (this will also deal with the case when the 230 // pause time target is the default value). 231 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 232 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 233 } 234 235 // Finally, make sure that the two parameters are consistent. 236 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 237 char buffer[256]; 238 jio_snprintf(buffer, 256, 239 "MaxGCPauseMillis (%u) should be less than " 240 "GCPauseIntervalMillis (%u)", 241 MaxGCPauseMillis, GCPauseIntervalMillis); 242 vm_exit_during_initialization(buffer); 243 } 244 245 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 246 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 247 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 248 249 // start conservatively (around 50ms is about right) 250 _concurrent_mark_remark_times_ms->add(0.05); 251 _concurrent_mark_cleanup_times_ms->add(0.20); 252 _tenuring_threshold = MaxTenuringThreshold; 253 254 assert(GCTimeRatio > 0, 255 "we should have set it to a default value set_g1_gc_flags() " 256 "if a user set it to 0"); 257 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 258 259 uintx reserve_perc = G1ReservePercent; 260 // Put an artificial ceiling on this so that it's not set to a silly value. 261 if (reserve_perc > 50) { 262 reserve_perc = 50; 263 warning("G1ReservePercent is set to a value that is too large, " 264 "it's been updated to " UINTX_FORMAT, reserve_perc); 265 } 266 _reserve_factor = (double) reserve_perc / 100.0; 267 // This will be set when the heap is expanded 268 // for the first time during initialization. 269 _reserve_regions = 0; 270 271 _cset_chooser = new CollectionSetChooser(); 272 } 273 274 G1CollectorPolicy::~G1CollectorPolicy() { 275 delete _ihop_control; 276 } 277 278 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 279 return _predictor.get_new_prediction(seq); 280 } 281 282 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { 283 return (size_t)get_new_prediction(seq); 284 } 285 286 void G1CollectorPolicy::initialize_alignments() { 287 _space_alignment = HeapRegion::GrainBytes; 288 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 289 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 290 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 291 } 292 293 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 294 295 // There are three command line options related to the young gen size: 296 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 297 // just a short form for NewSize==MaxNewSize). G1 will use its internal 298 // heuristics to calculate the actual young gen size, so these options 299 // basically only limit the range within which G1 can pick a young gen 300 // size. Also, these are general options taking byte sizes. G1 will 301 // internally work with a number of regions instead. So, some rounding 302 // will occur. 303 // 304 // If nothing related to the the young gen size is set on the command 305 // line we should allow the young gen to be between G1NewSizePercent 306 // and G1MaxNewSizePercent of the heap size. This means that every time 307 // the heap size changes, the limits for the young gen size will be 308 // recalculated. 309 // 310 // If only -XX:NewSize is set we should use the specified value as the 311 // minimum size for young gen. Still using G1MaxNewSizePercent of the 312 // heap as maximum. 313 // 314 // If only -XX:MaxNewSize is set we should use the specified value as the 315 // maximum size for young gen. Still using G1NewSizePercent of the heap 316 // as minimum. 317 // 318 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 319 // No updates when the heap size changes. There is a special case when 320 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 321 // different heuristic for calculating the collection set when we do mixed 322 // collection. 323 // 324 // If only -XX:NewRatio is set we should use the specified ratio of the heap 325 // as both min and max. This will be interpreted as "fixed" just like the 326 // NewSize==MaxNewSize case above. But we will update the min and max 327 // every time the heap size changes. 328 // 329 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 330 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 331 class G1YoungGenSizer : public CHeapObj<mtGC> { 332 private: 333 enum SizerKind { 334 SizerDefaults, 335 SizerNewSizeOnly, 336 SizerMaxNewSizeOnly, 337 SizerMaxAndNewSize, 338 SizerNewRatio 339 }; 340 SizerKind _sizer_kind; 341 uint _min_desired_young_length; 342 uint _max_desired_young_length; 343 bool _adaptive_size; 344 uint calculate_default_min_length(uint new_number_of_heap_regions); 345 uint calculate_default_max_length(uint new_number_of_heap_regions); 346 347 // Update the given values for minimum and maximum young gen length in regions 348 // given the number of heap regions depending on the kind of sizing algorithm. 349 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); 350 351 public: 352 G1YoungGenSizer(); 353 // Calculate the maximum length of the young gen given the number of regions 354 // depending on the sizing algorithm. 355 uint max_young_length(uint number_of_heap_regions); 356 357 void heap_size_changed(uint new_number_of_heap_regions); 358 uint min_desired_young_length() { 359 return _min_desired_young_length; 360 } 361 uint max_desired_young_length() { 362 return _max_desired_young_length; 363 } 364 365 bool adaptive_young_list_length() const { 366 return _adaptive_size; 367 } 368 }; 369 370 371 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), 372 _min_desired_young_length(0), _max_desired_young_length(0) { 373 if (FLAG_IS_CMDLINE(NewRatio)) { 374 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 375 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 376 } else { 377 _sizer_kind = SizerNewRatio; 378 _adaptive_size = false; 379 return; 380 } 381 } 382 383 if (NewSize > MaxNewSize) { 384 if (FLAG_IS_CMDLINE(MaxNewSize)) { 385 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 386 "A new max generation size of " SIZE_FORMAT "k will be used.", 387 NewSize/K, MaxNewSize/K, NewSize/K); 388 } 389 MaxNewSize = NewSize; 390 } 391 392 if (FLAG_IS_CMDLINE(NewSize)) { 393 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 394 1U); 395 if (FLAG_IS_CMDLINE(MaxNewSize)) { 396 _max_desired_young_length = 397 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 398 1U); 399 _sizer_kind = SizerMaxAndNewSize; 400 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 401 } else { 402 _sizer_kind = SizerNewSizeOnly; 403 } 404 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 405 _max_desired_young_length = 406 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 407 1U); 408 _sizer_kind = SizerMaxNewSizeOnly; 409 } 410 } 411 412 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 413 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 414 return MAX2(1U, default_value); 415 } 416 417 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 418 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 419 return MAX2(1U, default_value); 420 } 421 422 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { 423 assert(number_of_heap_regions > 0, "Heap must be initialized"); 424 425 switch (_sizer_kind) { 426 case SizerDefaults: 427 *min_young_length = calculate_default_min_length(number_of_heap_regions); 428 *max_young_length = calculate_default_max_length(number_of_heap_regions); 429 break; 430 case SizerNewSizeOnly: 431 *max_young_length = calculate_default_max_length(number_of_heap_regions); 432 *max_young_length = MAX2(*min_young_length, *max_young_length); 433 break; 434 case SizerMaxNewSizeOnly: 435 *min_young_length = calculate_default_min_length(number_of_heap_regions); 436 *min_young_length = MIN2(*min_young_length, *max_young_length); 437 break; 438 case SizerMaxAndNewSize: 439 // Do nothing. Values set on the command line, don't update them at runtime. 440 break; 441 case SizerNewRatio: 442 *min_young_length = number_of_heap_regions / (NewRatio + 1); 443 *max_young_length = *min_young_length; 444 break; 445 default: 446 ShouldNotReachHere(); 447 } 448 449 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); 450 } 451 452 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { 453 // We need to pass the desired values because recalculation may not update these 454 // values in some cases. 455 uint temp = _min_desired_young_length; 456 uint result = _max_desired_young_length; 457 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); 458 return result; 459 } 460 461 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 462 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, 463 &_max_desired_young_length); 464 } 465 466 void G1CollectorPolicy::post_heap_initialize() { 467 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 468 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 469 if (max_young_size != MaxNewSize) { 470 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 471 } 472 473 _ihop_control = create_ihop_control(); 474 } 475 476 void G1CollectorPolicy::initialize_flags() { 477 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 478 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 479 } 480 481 if (SurvivorRatio < 1) { 482 vm_exit_during_initialization("Invalid survivor ratio specified"); 483 } 484 CollectorPolicy::initialize_flags(); 485 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 486 } 487 488 489 void G1CollectorPolicy::init() { 490 // Set aside an initial future to_space. 491 _g1 = G1CollectedHeap::heap(); 492 493 assert(Heap_lock->owned_by_self(), "Locking discipline."); 494 495 initialize_gc_policy_counters(); 496 497 if (adaptive_young_list_length()) { 498 _young_list_fixed_length = 0; 499 } else { 500 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 501 } 502 _free_regions_at_end_of_collection = _g1->num_free_regions(); 503 504 update_young_list_max_and_target_length(); 505 // We may immediately start allocating regions and placing them on the 506 // collection set list. Initialize the per-collection set info 507 start_incremental_cset_building(); 508 } 509 510 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 511 phase_times()->note_gc_start(num_active_workers); 512 } 513 514 // Create the jstat counters for the policy. 515 void G1CollectorPolicy::initialize_gc_policy_counters() { 516 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 517 } 518 519 bool G1CollectorPolicy::predict_will_fit(uint young_length, 520 double base_time_ms, 521 uint base_free_regions, 522 double target_pause_time_ms) const { 523 if (young_length >= base_free_regions) { 524 // end condition 1: not enough space for the young regions 525 return false; 526 } 527 528 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 529 size_t bytes_to_copy = 530 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 531 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 532 double young_other_time_ms = predict_young_other_time_ms(young_length); 533 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 534 if (pause_time_ms > target_pause_time_ms) { 535 // end condition 2: prediction is over the target pause time 536 return false; 537 } 538 539 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 540 541 // When copying, we will likely need more bytes free than is live in the region. 542 // Add some safety margin to factor in the confidence of our guess, and the 543 // natural expected waste. 544 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 545 // of the calculation: the lower the confidence, the more headroom. 546 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 547 // copying due to anticipated waste in the PLABs. 548 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 549 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 550 551 if (expected_bytes_to_copy > free_bytes) { 552 // end condition 3: out-of-space 553 return false; 554 } 555 556 // success! 557 return true; 558 } 559 560 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 561 // re-calculate the necessary reserve 562 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 563 // We use ceiling so that if reserve_regions_d is > 0.0 (but 564 // smaller than 1.0) we'll get 1. 565 _reserve_regions = (uint) ceil(reserve_regions_d); 566 567 _young_gen_sizer->heap_size_changed(new_number_of_regions); 568 } 569 570 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 571 uint base_min_length) const { 572 uint desired_min_length = 0; 573 if (adaptive_young_list_length()) { 574 if (_alloc_rate_ms_seq->num() > 3) { 575 double now_sec = os::elapsedTime(); 576 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 577 double alloc_rate_ms = predict_alloc_rate_ms(); 578 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 579 } else { 580 // otherwise we don't have enough info to make the prediction 581 } 582 } 583 desired_min_length += base_min_length; 584 // make sure we don't go below any user-defined minimum bound 585 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 586 } 587 588 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 589 // Here, we might want to also take into account any additional 590 // constraints (i.e., user-defined minimum bound). Currently, we 591 // effectively don't set this bound. 592 return _young_gen_sizer->max_desired_young_length(); 593 } 594 595 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 596 return update_young_list_max_and_target_length(get_new_size_prediction(_rs_lengths_seq)); 597 } 598 599 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 600 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 601 update_max_gc_locker_expansion(); 602 return unbounded_target_length; 603 } 604 605 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 606 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 607 _young_list_target_length = young_lengths.first; 608 return young_lengths.second; 609 } 610 611 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 612 YoungTargetLengths result; 613 614 // Calculate the absolute and desired min bounds first. 615 616 // This is how many young regions we already have (currently: the survivors). 617 uint base_min_length = recorded_survivor_regions(); 618 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 619 // This is the absolute minimum young length. Ensure that we 620 // will at least have one eden region available for allocation. 621 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 622 // If we shrank the young list target it should not shrink below the current size. 623 desired_min_length = MAX2(desired_min_length, absolute_min_length); 624 // Calculate the absolute and desired max bounds. 625 626 uint desired_max_length = calculate_young_list_desired_max_length(); 627 628 uint young_list_target_length = 0; 629 if (adaptive_young_list_length()) { 630 if (collector_state()->gcs_are_young()) { 631 young_list_target_length = 632 calculate_young_list_target_length(rs_lengths, 633 base_min_length, 634 desired_min_length, 635 desired_max_length); 636 } else { 637 // Don't calculate anything and let the code below bound it to 638 // the desired_min_length, i.e., do the next GC as soon as 639 // possible to maximize how many old regions we can add to it. 640 } 641 } else { 642 // The user asked for a fixed young gen so we'll fix the young gen 643 // whether the next GC is young or mixed. 644 young_list_target_length = _young_list_fixed_length; 645 } 646 647 result.second = young_list_target_length; 648 649 // We will try our best not to "eat" into the reserve. 650 uint absolute_max_length = 0; 651 if (_free_regions_at_end_of_collection > _reserve_regions) { 652 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 653 } 654 if (desired_max_length > absolute_max_length) { 655 desired_max_length = absolute_max_length; 656 } 657 658 // Make sure we don't go over the desired max length, nor under the 659 // desired min length. In case they clash, desired_min_length wins 660 // which is why that test is second. 661 if (young_list_target_length > desired_max_length) { 662 young_list_target_length = desired_max_length; 663 } 664 if (young_list_target_length < desired_min_length) { 665 young_list_target_length = desired_min_length; 666 } 667 668 assert(young_list_target_length > recorded_survivor_regions(), 669 "we should be able to allocate at least one eden region"); 670 assert(young_list_target_length >= absolute_min_length, "post-condition"); 671 672 result.first = young_list_target_length; 673 return result; 674 } 675 676 uint 677 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 678 uint base_min_length, 679 uint desired_min_length, 680 uint desired_max_length) const { 681 assert(adaptive_young_list_length(), "pre-condition"); 682 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 683 684 // In case some edge-condition makes the desired max length too small... 685 if (desired_max_length <= desired_min_length) { 686 return desired_min_length; 687 } 688 689 // We'll adjust min_young_length and max_young_length not to include 690 // the already allocated young regions (i.e., so they reflect the 691 // min and max eden regions we'll allocate). The base_min_length 692 // will be reflected in the predictions by the 693 // survivor_regions_evac_time prediction. 694 assert(desired_min_length > base_min_length, "invariant"); 695 uint min_young_length = desired_min_length - base_min_length; 696 assert(desired_max_length > base_min_length, "invariant"); 697 uint max_young_length = desired_max_length - base_min_length; 698 699 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 700 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 701 size_t pending_cards = get_new_size_prediction(_pending_cards_seq); 702 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 703 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 704 double base_time_ms = 705 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 706 survivor_regions_evac_time; 707 uint available_free_regions = _free_regions_at_end_of_collection; 708 uint base_free_regions = 0; 709 if (available_free_regions > _reserve_regions) { 710 base_free_regions = available_free_regions - _reserve_regions; 711 } 712 713 // Here, we will make sure that the shortest young length that 714 // makes sense fits within the target pause time. 715 716 if (predict_will_fit(min_young_length, base_time_ms, 717 base_free_regions, target_pause_time_ms)) { 718 // The shortest young length will fit into the target pause time; 719 // we'll now check whether the absolute maximum number of young 720 // regions will fit in the target pause time. If not, we'll do 721 // a binary search between min_young_length and max_young_length. 722 if (predict_will_fit(max_young_length, base_time_ms, 723 base_free_regions, target_pause_time_ms)) { 724 // The maximum young length will fit into the target pause time. 725 // We are done so set min young length to the maximum length (as 726 // the result is assumed to be returned in min_young_length). 727 min_young_length = max_young_length; 728 } else { 729 // The maximum possible number of young regions will not fit within 730 // the target pause time so we'll search for the optimal 731 // length. The loop invariants are: 732 // 733 // min_young_length < max_young_length 734 // min_young_length is known to fit into the target pause time 735 // max_young_length is known not to fit into the target pause time 736 // 737 // Going into the loop we know the above hold as we've just 738 // checked them. Every time around the loop we check whether 739 // the middle value between min_young_length and 740 // max_young_length fits into the target pause time. If it 741 // does, it becomes the new min. If it doesn't, it becomes 742 // the new max. This way we maintain the loop invariants. 743 744 assert(min_young_length < max_young_length, "invariant"); 745 uint diff = (max_young_length - min_young_length) / 2; 746 while (diff > 0) { 747 uint young_length = min_young_length + diff; 748 if (predict_will_fit(young_length, base_time_ms, 749 base_free_regions, target_pause_time_ms)) { 750 min_young_length = young_length; 751 } else { 752 max_young_length = young_length; 753 } 754 assert(min_young_length < max_young_length, "invariant"); 755 diff = (max_young_length - min_young_length) / 2; 756 } 757 // The results is min_young_length which, according to the 758 // loop invariants, should fit within the target pause time. 759 760 // These are the post-conditions of the binary search above: 761 assert(min_young_length < max_young_length, 762 "otherwise we should have discovered that max_young_length " 763 "fits into the pause target and not done the binary search"); 764 assert(predict_will_fit(min_young_length, base_time_ms, 765 base_free_regions, target_pause_time_ms), 766 "min_young_length, the result of the binary search, should " 767 "fit into the pause target"); 768 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 769 base_free_regions, target_pause_time_ms), 770 "min_young_length, the result of the binary search, should be " 771 "optimal, so no larger length should fit into the pause target"); 772 } 773 } else { 774 // Even the minimum length doesn't fit into the pause time 775 // target, return it as the result nevertheless. 776 } 777 return base_min_length + min_young_length; 778 } 779 780 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 781 double survivor_regions_evac_time = 0.0; 782 for (HeapRegion * r = _recorded_survivor_head; 783 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 784 r = r->get_next_young_region()) { 785 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 786 } 787 return survivor_regions_evac_time; 788 } 789 790 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 791 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 792 793 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 794 if (rs_lengths > _rs_lengths_prediction) { 795 // add 10% to avoid having to recalculate often 796 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 797 update_rs_lengths_prediction(rs_lengths_prediction); 798 799 update_young_list_max_and_target_length(rs_lengths_prediction); 800 } 801 } 802 803 void G1CollectorPolicy::update_rs_lengths_prediction() { 804 update_rs_lengths_prediction(get_new_size_prediction(_rs_lengths_seq)); 805 } 806 807 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 808 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 809 _rs_lengths_prediction = prediction; 810 } 811 } 812 813 #ifndef PRODUCT 814 bool G1CollectorPolicy::verify_young_ages() { 815 HeapRegion* head = _g1->young_list()->first_region(); 816 return 817 verify_young_ages(head, _short_lived_surv_rate_group); 818 // also call verify_young_ages on any additional surv rate groups 819 } 820 821 bool 822 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 823 SurvRateGroup *surv_rate_group) { 824 guarantee( surv_rate_group != NULL, "pre-condition" ); 825 826 const char* name = surv_rate_group->name(); 827 bool ret = true; 828 int prev_age = -1; 829 830 for (HeapRegion* curr = head; 831 curr != NULL; 832 curr = curr->get_next_young_region()) { 833 SurvRateGroup* group = curr->surv_rate_group(); 834 if (group == NULL && !curr->is_survivor()) { 835 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); 836 ret = false; 837 } 838 839 if (surv_rate_group == group) { 840 int age = curr->age_in_surv_rate_group(); 841 842 if (age < 0) { 843 log_error(gc, verify)("## %s: encountered negative age", name); 844 ret = false; 845 } 846 847 if (age <= prev_age) { 848 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 849 ret = false; 850 } 851 prev_age = age; 852 } 853 } 854 855 return ret; 856 } 857 #endif // PRODUCT 858 859 void G1CollectorPolicy::record_full_collection_start() { 860 _full_collection_start_sec = os::elapsedTime(); 861 // Release the future to-space so that it is available for compaction into. 862 collector_state()->set_full_collection(true); 863 } 864 865 void G1CollectorPolicy::record_full_collection_end() { 866 // Consider this like a collection pause for the purposes of allocation 867 // since last pause. 868 double end_sec = os::elapsedTime(); 869 double full_gc_time_sec = end_sec - _full_collection_start_sec; 870 double full_gc_time_ms = full_gc_time_sec * 1000.0; 871 872 update_recent_gc_times(end_sec, full_gc_time_ms); 873 874 collector_state()->set_full_collection(false); 875 876 // "Nuke" the heuristics that control the young/mixed GC 877 // transitions and make sure we start with young GCs after the Full GC. 878 collector_state()->set_gcs_are_young(true); 879 collector_state()->set_last_young_gc(false); 880 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 881 collector_state()->set_during_initial_mark_pause(false); 882 collector_state()->set_in_marking_window(false); 883 collector_state()->set_in_marking_window_im(false); 884 885 _short_lived_surv_rate_group->start_adding_regions(); 886 // also call this on any additional surv rate groups 887 888 record_survivor_regions(0, NULL, NULL); 889 890 _free_regions_at_end_of_collection = _g1->num_free_regions(); 891 // Reset survivors SurvRateGroup. 892 _survivor_surv_rate_group->reset(); 893 update_young_list_max_and_target_length(); 894 update_rs_lengths_prediction(); 895 cset_chooser()->clear(); 896 897 _bytes_allocated_in_old_since_last_gc = 0; 898 899 record_pause(FullGC, _full_collection_start_sec, end_sec); 900 } 901 902 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 903 // We only need to do this here as the policy will only be applied 904 // to the GC we're about to start. so, no point is calculating this 905 // every time we calculate / recalculate the target young length. 906 update_survivors_policy(); 907 908 assert(_g1->used() == _g1->recalculate_used(), 909 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 910 _g1->used(), _g1->recalculate_used()); 911 912 phase_times()->record_cur_collection_start_sec(start_time_sec); 913 _pending_cards = _g1->pending_card_num(); 914 915 _collection_set_bytes_used_before = 0; 916 _bytes_copied_during_gc = 0; 917 918 collector_state()->set_last_gc_was_young(false); 919 920 // do that for any other surv rate groups 921 _short_lived_surv_rate_group->stop_adding_regions(); 922 _survivors_age_table.clear(); 923 924 assert( verify_young_ages(), "region age verification" ); 925 } 926 927 void G1CollectorPolicy::record_concurrent_mark_init_end(double 928 mark_init_elapsed_time_ms) { 929 collector_state()->set_during_marking(true); 930 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 931 collector_state()->set_during_initial_mark_pause(false); 932 } 933 934 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 935 _mark_remark_start_sec = os::elapsedTime(); 936 collector_state()->set_during_marking(false); 937 } 938 939 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 940 double end_time_sec = os::elapsedTime(); 941 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 942 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 943 _prev_collection_pause_end_ms += elapsed_time_ms; 944 945 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 946 } 947 948 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 949 _mark_cleanup_start_sec = os::elapsedTime(); 950 } 951 952 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 953 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 954 "skip last young-only gc"); 955 collector_state()->set_last_young_gc(should_continue_with_reclaim); 956 // We skip the marking phase. 957 if (!should_continue_with_reclaim) { 958 abort_time_to_mixed_tracking(); 959 } 960 collector_state()->set_in_marking_window(false); 961 } 962 963 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 964 return phase_times()->average_time_ms(phase); 965 } 966 967 double G1CollectorPolicy::young_other_time_ms() const { 968 return phase_times()->young_cset_choice_time_ms() + 969 phase_times()->young_free_cset_time_ms(); 970 } 971 972 double G1CollectorPolicy::non_young_other_time_ms() const { 973 return phase_times()->non_young_cset_choice_time_ms() + 974 phase_times()->non_young_free_cset_time_ms(); 975 976 } 977 978 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 979 return pause_time_ms - 980 average_time_ms(G1GCPhaseTimes::UpdateRS) - 981 average_time_ms(G1GCPhaseTimes::ScanRS) - 982 average_time_ms(G1GCPhaseTimes::ObjCopy) - 983 average_time_ms(G1GCPhaseTimes::Termination); 984 } 985 986 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 987 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 988 } 989 990 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 991 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 992 } 993 994 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 995 if (about_to_start_mixed_phase()) { 996 return false; 997 } 998 999 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 1000 1001 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 1002 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 1003 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 1004 1005 bool result = false; 1006 if (marking_request_bytes > marking_initiating_used_threshold) { 1007 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 1008 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 1009 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 1010 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 1011 } 1012 1013 return result; 1014 } 1015 1016 // Anything below that is considered to be zero 1017 #define MIN_TIMER_GRANULARITY 0.0000001 1018 1019 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 1020 double end_time_sec = os::elapsedTime(); 1021 1022 size_t cur_used_bytes = _g1->used(); 1023 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1024 bool last_pause_included_initial_mark = false; 1025 bool update_stats = !_g1->evacuation_failed(); 1026 1027 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 1028 1029 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 1030 1031 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 1032 if (last_pause_included_initial_mark) { 1033 record_concurrent_mark_init_end(0.0); 1034 } else { 1035 maybe_start_marking(); 1036 } 1037 1038 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 1039 if (app_time_ms < MIN_TIMER_GRANULARITY) { 1040 // This usually happens due to the timer not having the required 1041 // granularity. Some Linuxes are the usual culprits. 1042 // We'll just set it to something (arbitrarily) small. 1043 app_time_ms = 1.0; 1044 } 1045 1046 if (update_stats) { 1047 // We maintain the invariant that all objects allocated by mutator 1048 // threads will be allocated out of eden regions. So, we can use 1049 // the eden region number allocated since the previous GC to 1050 // calculate the application's allocate rate. The only exception 1051 // to that is humongous objects that are allocated separately. But 1052 // given that humongous object allocations do not really affect 1053 // either the pause's duration nor when the next pause will take 1054 // place we can safely ignore them here. 1055 uint regions_allocated = eden_cset_region_length(); 1056 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1057 _alloc_rate_ms_seq->add(alloc_rate_ms); 1058 1059 double interval_ms = 1060 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1061 update_recent_gc_times(end_time_sec, pause_time_ms); 1062 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1063 if (recent_avg_pause_time_ratio() < 0.0 || 1064 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1065 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1066 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1067 if (_recent_avg_pause_time_ratio < 0.0) { 1068 _recent_avg_pause_time_ratio = 0.0; 1069 } else { 1070 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1071 _recent_avg_pause_time_ratio = 1.0; 1072 } 1073 } 1074 1075 // Compute the ratio of just this last pause time to the entire time range stored 1076 // in the vectors. Comparing this pause to the entire range, rather than only the 1077 // most recent interval, has the effect of smoothing over a possible transient 'burst' 1078 // of more frequent pauses that don't really reflect a change in heap occupancy. 1079 // This reduces the likelihood of a needless heap expansion being triggered. 1080 _last_pause_time_ratio = 1081 (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; 1082 } 1083 1084 bool new_in_marking_window = collector_state()->in_marking_window(); 1085 bool new_in_marking_window_im = false; 1086 if (last_pause_included_initial_mark) { 1087 new_in_marking_window = true; 1088 new_in_marking_window_im = true; 1089 } 1090 1091 if (collector_state()->last_young_gc()) { 1092 // This is supposed to to be the "last young GC" before we start 1093 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1094 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 1095 1096 if (next_gc_should_be_mixed("start mixed GCs", 1097 "do not start mixed GCs")) { 1098 collector_state()->set_gcs_are_young(false); 1099 } else { 1100 // We aborted the mixed GC phase early. 1101 abort_time_to_mixed_tracking(); 1102 } 1103 1104 collector_state()->set_last_young_gc(false); 1105 } 1106 1107 if (!collector_state()->last_gc_was_young()) { 1108 // This is a mixed GC. Here we decide whether to continue doing 1109 // mixed GCs or not. 1110 if (!next_gc_should_be_mixed("continue mixed GCs", 1111 "do not continue mixed GCs")) { 1112 collector_state()->set_gcs_are_young(true); 1113 1114 maybe_start_marking(); 1115 } 1116 } 1117 1118 _short_lived_surv_rate_group->start_adding_regions(); 1119 // Do that for any other surv rate groups 1120 1121 if (update_stats) { 1122 double cost_per_card_ms = 0.0; 1123 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC); 1124 if (_pending_cards > 0) { 1125 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards; 1126 _cost_per_card_ms_seq->add(cost_per_card_ms); 1127 } 1128 _cost_scan_hcc_seq->add(cost_scan_hcc); 1129 1130 double cost_per_entry_ms = 0.0; 1131 if (cards_scanned > 10) { 1132 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 1133 if (collector_state()->last_gc_was_young()) { 1134 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1135 } else { 1136 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1137 } 1138 } 1139 1140 if (_max_rs_lengths > 0) { 1141 double cards_per_entry_ratio = 1142 (double) cards_scanned / (double) _max_rs_lengths; 1143 if (collector_state()->last_gc_was_young()) { 1144 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1145 } else { 1146 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1147 } 1148 } 1149 1150 // This is defensive. For a while _max_rs_lengths could get 1151 // smaller than _recorded_rs_lengths which was causing 1152 // rs_length_diff to get very large and mess up the RSet length 1153 // predictions. The reason was unsafe concurrent updates to the 1154 // _inc_cset_recorded_rs_lengths field which the code below guards 1155 // against (see CR 7118202). This bug has now been fixed (see CR 1156 // 7119027). However, I'm still worried that 1157 // _inc_cset_recorded_rs_lengths might still end up somewhat 1158 // inaccurate. The concurrent refinement thread calculates an 1159 // RSet's length concurrently with other CR threads updating it 1160 // which might cause it to calculate the length incorrectly (if, 1161 // say, it's in mid-coarsening). So I'll leave in the defensive 1162 // conditional below just in case. 1163 size_t rs_length_diff = 0; 1164 if (_max_rs_lengths > _recorded_rs_lengths) { 1165 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1166 } 1167 _rs_length_diff_seq->add((double) rs_length_diff); 1168 1169 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 1170 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1171 double cost_per_byte_ms = 0.0; 1172 1173 if (copied_bytes > 0) { 1174 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 1175 if (collector_state()->in_marking_window()) { 1176 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1177 } else { 1178 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1179 } 1180 } 1181 1182 if (young_cset_region_length() > 0) { 1183 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 1184 young_cset_region_length()); 1185 } 1186 1187 if (old_cset_region_length() > 0) { 1188 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 1189 old_cset_region_length()); 1190 } 1191 1192 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 1193 1194 _pending_cards_seq->add((double) _pending_cards); 1195 _rs_lengths_seq->add((double) _max_rs_lengths); 1196 } 1197 1198 collector_state()->set_in_marking_window(new_in_marking_window); 1199 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 1200 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1201 // IHOP control wants to know the expected young gen length if it were not 1202 // restrained by the heap reserve. Using the actual length would make the 1203 // prediction too small and the limit the young gen every time we get to the 1204 // predicted target occupancy. 1205 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 1206 update_rs_lengths_prediction(); 1207 1208 update_ihop_prediction(app_time_ms / 1000.0, 1209 _bytes_allocated_in_old_since_last_gc, 1210 last_unrestrained_young_length * HeapRegion::GrainBytes); 1211 _bytes_allocated_in_old_since_last_gc = 0; 1212 1213 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 1214 1215 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1216 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1217 1218 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC); 1219 1220 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1221 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 1222 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 1223 update_rs_time_goal_ms, scan_hcc_time_ms); 1224 1225 update_rs_time_goal_ms = 0; 1226 } else { 1227 update_rs_time_goal_ms -= scan_hcc_time_ms; 1228 } 1229 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1230 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1231 update_rs_time_goal_ms); 1232 1233 cset_chooser()->verify(); 1234 } 1235 1236 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1237 if (G1UseAdaptiveIHOP) { 1238 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1239 G1CollectedHeap::heap()->max_capacity(), 1240 &_predictor, 1241 G1ReservePercent, 1242 G1HeapWastePercent); 1243 } else { 1244 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, 1245 G1CollectedHeap::heap()->max_capacity()); 1246 } 1247 } 1248 1249 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 1250 size_t mutator_alloc_bytes, 1251 size_t young_gen_size) { 1252 // Always try to update IHOP prediction. Even evacuation failures give information 1253 // about e.g. whether to start IHOP earlier next time. 1254 1255 // Avoid using really small application times that might create samples with 1256 // very high or very low values. They may be caused by e.g. back-to-back gcs. 1257 double const min_valid_time = 1e-6; 1258 1259 bool report = false; 1260 1261 double marking_to_mixed_time = -1.0; 1262 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1263 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1264 assert(marking_to_mixed_time > 0.0, 1265 "Initial mark to mixed time must be larger than zero but is %.3f", 1266 marking_to_mixed_time); 1267 if (marking_to_mixed_time > min_valid_time) { 1268 _ihop_control->update_marking_length(marking_to_mixed_time); 1269 report = true; 1270 } 1271 } 1272 1273 // As an approximation for the young gc promotion rates during marking we use 1274 // all of them. In many applications there are only a few if any young gcs during 1275 // marking, which makes any prediction useless. This increases the accuracy of the 1276 // prediction. 1277 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1278 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1279 report = true; 1280 } 1281 1282 if (report) { 1283 report_ihop_statistics(); 1284 } 1285 } 1286 1287 void G1CollectorPolicy::report_ihop_statistics() { 1288 _ihop_control->print(); 1289 } 1290 1291 void G1CollectorPolicy::print_phases() { 1292 phase_times()->print(); 1293 } 1294 1295 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1296 double update_rs_processed_buffers, 1297 double goal_ms) { 1298 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1299 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1300 1301 if (G1UseAdaptiveConcRefinement) { 1302 const int k_gy = 3, k_gr = 6; 1303 const double inc_k = 1.1, dec_k = 0.9; 1304 1305 int g = cg1r->green_zone(); 1306 if (update_rs_time > goal_ms) { 1307 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1308 } else { 1309 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1310 g = (int)MAX2(g * inc_k, g + 1.0); 1311 } 1312 } 1313 // Change the refinement threads params 1314 cg1r->set_green_zone(g); 1315 cg1r->set_yellow_zone(g * k_gy); 1316 cg1r->set_red_zone(g * k_gr); 1317 cg1r->reinitialize_threads(); 1318 1319 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1); 1320 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1321 cg1r->yellow_zone()); 1322 // Change the barrier params 1323 dcqs.set_process_completed_threshold(processing_threshold); 1324 dcqs.set_max_completed_queue(cg1r->red_zone()); 1325 } 1326 1327 int curr_queue_size = dcqs.completed_buffers_num(); 1328 if (curr_queue_size >= cg1r->yellow_zone()) { 1329 dcqs.set_completed_queue_padding(curr_queue_size); 1330 } else { 1331 dcqs.set_completed_queue_padding(0); 1332 } 1333 dcqs.notify_if_necessary(); 1334 } 1335 1336 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1337 return get_new_size_prediction(_rs_length_diff_seq); 1338 } 1339 1340 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1341 return get_new_prediction(_alloc_rate_ms_seq); 1342 } 1343 1344 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1345 return get_new_prediction(_cost_per_card_ms_seq); 1346 } 1347 1348 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1349 return get_new_prediction(_cost_scan_hcc_seq); 1350 } 1351 1352 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1353 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1354 } 1355 1356 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1357 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1358 } 1359 1360 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1361 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1362 return predict_young_cards_per_entry_ratio(); 1363 } else { 1364 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1365 } 1366 } 1367 1368 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1369 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1370 } 1371 1372 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1373 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1374 } 1375 1376 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1377 if (collector_state()->gcs_are_young()) { 1378 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1379 } else { 1380 return predict_mixed_rs_scan_time_ms(card_num); 1381 } 1382 } 1383 1384 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1385 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1386 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1387 } else { 1388 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1389 } 1390 } 1391 1392 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1393 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1394 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1395 } else { 1396 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1397 } 1398 } 1399 1400 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1401 if (collector_state()->during_concurrent_mark()) { 1402 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1403 } else { 1404 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1405 } 1406 } 1407 1408 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1409 return get_new_prediction(_constant_other_time_ms_seq); 1410 } 1411 1412 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1413 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1414 } 1415 1416 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1417 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1418 } 1419 1420 double G1CollectorPolicy::predict_remark_time_ms() const { 1421 return get_new_prediction(_concurrent_mark_remark_times_ms); 1422 } 1423 1424 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1425 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1426 } 1427 1428 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1429 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1430 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1431 double pred = get_new_prediction(seq); 1432 if (pred > 1.0) { 1433 pred = 1.0; 1434 } 1435 return pred; 1436 } 1437 1438 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1439 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1440 } 1441 1442 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1443 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1444 } 1445 1446 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1447 size_t scanned_cards) const { 1448 return 1449 predict_rs_update_time_ms(pending_cards) + 1450 predict_rs_scan_time_ms(scanned_cards) + 1451 predict_constant_other_time_ms(); 1452 } 1453 1454 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1455 size_t rs_length = predict_rs_length_diff(); 1456 size_t card_num; 1457 if (collector_state()->gcs_are_young()) { 1458 card_num = predict_young_card_num(rs_length); 1459 } else { 1460 card_num = predict_non_young_card_num(rs_length); 1461 } 1462 return predict_base_elapsed_time_ms(pending_cards, card_num); 1463 } 1464 1465 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1466 size_t bytes_to_copy; 1467 if (hr->is_marked()) 1468 bytes_to_copy = hr->max_live_bytes(); 1469 else { 1470 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1471 int age = hr->age_in_surv_rate_group(); 1472 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1473 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1474 } 1475 return bytes_to_copy; 1476 } 1477 1478 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1479 bool for_young_gc) const { 1480 size_t rs_length = hr->rem_set()->occupied(); 1481 size_t card_num; 1482 1483 // Predicting the number of cards is based on which type of GC 1484 // we're predicting for. 1485 if (for_young_gc) { 1486 card_num = predict_young_card_num(rs_length); 1487 } else { 1488 card_num = predict_non_young_card_num(rs_length); 1489 } 1490 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1491 1492 double region_elapsed_time_ms = 1493 predict_rs_scan_time_ms(card_num) + 1494 predict_object_copy_time_ms(bytes_to_copy); 1495 1496 // The prediction of the "other" time for this region is based 1497 // upon the region type and NOT the GC type. 1498 if (hr->is_young()) { 1499 region_elapsed_time_ms += predict_young_other_time_ms(1); 1500 } else { 1501 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1502 } 1503 return region_elapsed_time_ms; 1504 } 1505 1506 void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1507 uint survivor_cset_region_length) { 1508 _eden_cset_region_length = eden_cset_region_length; 1509 _survivor_cset_region_length = survivor_cset_region_length; 1510 _old_cset_region_length = 0; 1511 } 1512 1513 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1514 _recorded_rs_lengths = rs_lengths; 1515 } 1516 1517 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1518 double elapsed_ms) { 1519 _recent_gc_times_ms->add(elapsed_ms); 1520 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1521 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1522 } 1523 1524 void G1CollectorPolicy::clear_ratio_check_data() { 1525 _ratio_over_threshold_count = 0; 1526 _ratio_over_threshold_sum = 0.0; 1527 _pauses_since_start = 0; 1528 } 1529 1530 size_t G1CollectorPolicy::expansion_amount() { 1531 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1532 double last_gc_overhead = _last_pause_time_ratio * 100.0; 1533 double threshold = _gc_overhead_perc; 1534 size_t expand_bytes = 0; 1535 1536 // If the heap is at less than half its maximum size, scale the threshold down, 1537 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1538 // though the scaling code will likely keep the increase small. 1539 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1540 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1541 threshold = MAX2(threshold, 1.0); 1542 } 1543 1544 // If the last GC time ratio is over the threshold, increment the count of 1545 // times it has been exceeded, and add this ratio to the sum of exceeded 1546 // ratios. 1547 if (last_gc_overhead > threshold) { 1548 _ratio_over_threshold_count++; 1549 _ratio_over_threshold_sum += last_gc_overhead; 1550 } 1551 1552 // Check if we've had enough GC time ratio checks that were over the 1553 // threshold to trigger an expansion. We'll also expand if we've 1554 // reached the end of the history buffer and the average of all entries 1555 // is still over the threshold. This indicates a smaller number of GCs were 1556 // long enough to make the average exceed the threshold. 1557 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1558 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1559 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1560 size_t min_expand_bytes = HeapRegion::GrainBytes; 1561 size_t reserved_bytes = _g1->max_capacity(); 1562 size_t committed_bytes = _g1->capacity(); 1563 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1564 size_t expand_bytes_via_pct = 1565 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1566 double scale_factor = 1.0; 1567 1568 // If the current size is less than 1/4 of the Initial heap size, expand 1569 // by half of the delta between the current and Initial sizes. IE, grow 1570 // back quickly. 1571 // 1572 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1573 // the available expansion space, whichever is smaller, as the base 1574 // expansion size. Then possibly scale this size according to how much the 1575 // threshold has (on average) been exceeded by. If the delta is small 1576 // (less than the StartScaleDownAt value), scale the size down linearly, but 1577 // not by less than MinScaleDownFactor. If the delta is large (greater than 1578 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1579 // times the base size. The scaling will be linear in the range from 1580 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1581 // ScaleUpRange sets the rate of scaling up. 1582 if (committed_bytes < InitialHeapSize / 4) { 1583 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1584 } else { 1585 double const MinScaleDownFactor = 0.2; 1586 double const MaxScaleUpFactor = 2; 1587 double const StartScaleDownAt = _gc_overhead_perc; 1588 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1589 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1590 1591 double ratio_delta; 1592 if (filled_history_buffer) { 1593 ratio_delta = recent_gc_overhead - threshold; 1594 } else { 1595 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1596 } 1597 1598 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1599 if (ratio_delta < StartScaleDownAt) { 1600 scale_factor = ratio_delta / StartScaleDownAt; 1601 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1602 } else if (ratio_delta > StartScaleUpAt) { 1603 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1604 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1605 } 1606 } 1607 1608 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1609 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1610 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1611 1612 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1613 1614 // Ensure the expansion size is at least the minimum growth amount 1615 // and at most the remaining uncommitted byte size. 1616 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1617 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1618 1619 clear_ratio_check_data(); 1620 } else { 1621 // An expansion was not triggered. If we've started counting, increment 1622 // the number of checks we've made in the current window. If we've 1623 // reached the end of the window without resizing, clear the counters to 1624 // start again the next time we see a ratio above the threshold. 1625 if (_ratio_over_threshold_count > 0) { 1626 _pauses_since_start++; 1627 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1628 clear_ratio_check_data(); 1629 } 1630 } 1631 } 1632 1633 return expand_bytes; 1634 } 1635 1636 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1637 #ifndef PRODUCT 1638 _short_lived_surv_rate_group->print_surv_rate_summary(); 1639 // add this call for any other surv rate groups 1640 #endif // PRODUCT 1641 } 1642 1643 bool G1CollectorPolicy::is_young_list_full() const { 1644 uint young_list_length = _g1->young_list()->length(); 1645 uint young_list_target_length = _young_list_target_length; 1646 return young_list_length >= young_list_target_length; 1647 } 1648 1649 bool G1CollectorPolicy::can_expand_young_list() const { 1650 uint young_list_length = _g1->young_list()->length(); 1651 uint young_list_max_length = _young_list_max_length; 1652 return young_list_length < young_list_max_length; 1653 } 1654 1655 bool G1CollectorPolicy::adaptive_young_list_length() const { 1656 return _young_gen_sizer->adaptive_young_list_length(); 1657 } 1658 1659 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1660 uint expansion_region_num = 0; 1661 if (GCLockerEdenExpansionPercent > 0) { 1662 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1663 double expansion_region_num_d = perc * (double) _young_list_target_length; 1664 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1665 // less than 1.0) we'll get 1. 1666 expansion_region_num = (uint) ceil(expansion_region_num_d); 1667 } else { 1668 assert(expansion_region_num == 0, "sanity"); 1669 } 1670 _young_list_max_length = _young_list_target_length + expansion_region_num; 1671 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1672 } 1673 1674 // Calculates survivor space parameters. 1675 void G1CollectorPolicy::update_survivors_policy() { 1676 double max_survivor_regions_d = 1677 (double) _young_list_target_length / (double) SurvivorRatio; 1678 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1679 // smaller than 1.0) we'll get 1. 1680 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1681 1682 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1683 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1684 } 1685 1686 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1687 // We actually check whether we are marking here and not if we are in a 1688 // reclamation phase. This means that we will schedule a concurrent mark 1689 // even while we are still in the process of reclaiming memory. 1690 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1691 if (!during_cycle) { 1692 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1693 collector_state()->set_initiate_conc_mark_if_possible(true); 1694 return true; 1695 } else { 1696 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1697 return false; 1698 } 1699 } 1700 1701 void G1CollectorPolicy::initiate_conc_mark() { 1702 collector_state()->set_during_initial_mark_pause(true); 1703 collector_state()->set_initiate_conc_mark_if_possible(false); 1704 } 1705 1706 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1707 // We are about to decide on whether this pause will be an 1708 // initial-mark pause. 1709 1710 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1711 // will set it here if we have to. However, it should be cleared by 1712 // the end of the pause (it's only set for the duration of an 1713 // initial-mark pause). 1714 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1715 1716 if (collector_state()->initiate_conc_mark_if_possible()) { 1717 // We had noticed on a previous pause that the heap occupancy has 1718 // gone over the initiating threshold and we should start a 1719 // concurrent marking cycle. So we might initiate one. 1720 1721 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1722 // Initiate a new initial mark if there is no marking or reclamation going on. 1723 initiate_conc_mark(); 1724 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1725 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1726 // Initiate a user requested initial mark. An initial mark must be young only 1727 // GC, so the collector state must be updated to reflect this. 1728 collector_state()->set_gcs_are_young(true); 1729 collector_state()->set_last_young_gc(false); 1730 1731 abort_time_to_mixed_tracking(); 1732 initiate_conc_mark(); 1733 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1734 } else { 1735 // The concurrent marking thread is still finishing up the 1736 // previous cycle. If we start one right now the two cycles 1737 // overlap. In particular, the concurrent marking thread might 1738 // be in the process of clearing the next marking bitmap (which 1739 // we will use for the next cycle if we start one). Starting a 1740 // cycle now will be bad given that parts of the marking 1741 // information might get cleared by the marking thread. And we 1742 // cannot wait for the marking thread to finish the cycle as it 1743 // periodically yields while clearing the next marking bitmap 1744 // and, if it's in a yield point, it's waiting for us to 1745 // finish. So, at this point we will not start a cycle and we'll 1746 // let the concurrent marking thread complete the last one. 1747 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1748 } 1749 } 1750 } 1751 1752 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1753 G1CollectedHeap* _g1h; 1754 CSetChooserParUpdater _cset_updater; 1755 1756 public: 1757 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1758 uint chunk_size) : 1759 _g1h(G1CollectedHeap::heap()), 1760 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1761 1762 bool doHeapRegion(HeapRegion* r) { 1763 // Do we have any marking information for this region? 1764 if (r->is_marked()) { 1765 // We will skip any region that's currently used as an old GC 1766 // alloc region (we should not consider those for collection 1767 // before we fill them up). 1768 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1769 _cset_updater.add_region(r); 1770 } 1771 } 1772 return false; 1773 } 1774 }; 1775 1776 class ParKnownGarbageTask: public AbstractGangTask { 1777 CollectionSetChooser* _hrSorted; 1778 uint _chunk_size; 1779 G1CollectedHeap* _g1; 1780 HeapRegionClaimer _hrclaimer; 1781 1782 public: 1783 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1784 AbstractGangTask("ParKnownGarbageTask"), 1785 _hrSorted(hrSorted), _chunk_size(chunk_size), 1786 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1787 1788 void work(uint worker_id) { 1789 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1790 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1791 } 1792 }; 1793 1794 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1795 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1796 const uint overpartition_factor = 4; 1797 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1798 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1799 } 1800 1801 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1802 cset_chooser()->clear(); 1803 1804 WorkGang* workers = _g1->workers(); 1805 uint n_workers = workers->active_workers(); 1806 1807 uint n_regions = _g1->num_regions(); 1808 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1809 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1810 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1811 workers->run_task(&par_known_garbage_task); 1812 1813 cset_chooser()->sort_regions(); 1814 1815 double end_sec = os::elapsedTime(); 1816 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1817 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1818 _prev_collection_pause_end_ms += elapsed_time_ms; 1819 1820 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1821 } 1822 1823 // Add the heap region at the head of the non-incremental collection set 1824 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1825 assert(_inc_cset_build_state == Active, "Precondition"); 1826 assert(hr->is_old(), "the region should be old"); 1827 1828 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1829 _g1->register_old_region_with_cset(hr); 1830 hr->set_next_in_collection_set(_collection_set); 1831 _collection_set = hr; 1832 _collection_set_bytes_used_before += hr->used(); 1833 size_t rs_length = hr->rem_set()->occupied(); 1834 _recorded_rs_lengths += rs_length; 1835 _old_cset_region_length += 1; 1836 } 1837 1838 // Initialize the per-collection-set information 1839 void G1CollectorPolicy::start_incremental_cset_building() { 1840 assert(_inc_cset_build_state == Inactive, "Precondition"); 1841 1842 _inc_cset_head = NULL; 1843 _inc_cset_tail = NULL; 1844 _inc_cset_bytes_used_before = 0; 1845 1846 _inc_cset_recorded_rs_lengths = 0; 1847 _inc_cset_recorded_rs_lengths_diffs = 0; 1848 _inc_cset_predicted_elapsed_time_ms = 0.0; 1849 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1850 _inc_cset_build_state = Active; 1851 } 1852 1853 void G1CollectorPolicy::finalize_incremental_cset_building() { 1854 assert(_inc_cset_build_state == Active, "Precondition"); 1855 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1856 1857 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1858 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1859 // that adds a new region to the CSet. Further updates by the 1860 // concurrent refinement thread that samples the young RSet lengths 1861 // are accumulated in the *_diffs fields. Here we add the diffs to 1862 // the "main" fields. 1863 1864 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1865 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1866 } else { 1867 // This is defensive. The diff should in theory be always positive 1868 // as RSets can only grow between GCs. However, given that we 1869 // sample their size concurrently with other threads updating them 1870 // it's possible that we might get the wrong size back, which 1871 // could make the calculations somewhat inaccurate. 1872 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1873 if (_inc_cset_recorded_rs_lengths >= diffs) { 1874 _inc_cset_recorded_rs_lengths -= diffs; 1875 } else { 1876 _inc_cset_recorded_rs_lengths = 0; 1877 } 1878 } 1879 _inc_cset_predicted_elapsed_time_ms += 1880 _inc_cset_predicted_elapsed_time_ms_diffs; 1881 1882 _inc_cset_recorded_rs_lengths_diffs = 0; 1883 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1884 } 1885 1886 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1887 // This routine is used when: 1888 // * adding survivor regions to the incremental cset at the end of an 1889 // evacuation pause, 1890 // * adding the current allocation region to the incremental cset 1891 // when it is retired, and 1892 // * updating existing policy information for a region in the 1893 // incremental cset via young list RSet sampling. 1894 // Therefore this routine may be called at a safepoint by the 1895 // VM thread, or in-between safepoints by mutator threads (when 1896 // retiring the current allocation region) or a concurrent 1897 // refine thread (RSet sampling). 1898 1899 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 1900 size_t used_bytes = hr->used(); 1901 _inc_cset_recorded_rs_lengths += rs_length; 1902 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1903 _inc_cset_bytes_used_before += used_bytes; 1904 1905 // Cache the values we have added to the aggregated information 1906 // in the heap region in case we have to remove this region from 1907 // the incremental collection set, or it is updated by the 1908 // rset sampling code 1909 hr->set_recorded_rs_length(rs_length); 1910 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1911 } 1912 1913 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1914 size_t new_rs_length) { 1915 // Update the CSet information that is dependent on the new RS length 1916 assert(hr->is_young(), "Precondition"); 1917 assert(!SafepointSynchronize::is_at_safepoint(), 1918 "should not be at a safepoint"); 1919 1920 // We could have updated _inc_cset_recorded_rs_lengths and 1921 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1922 // that atomically, as this code is executed by a concurrent 1923 // refinement thread, potentially concurrently with a mutator thread 1924 // allocating a new region and also updating the same fields. To 1925 // avoid the atomic operations we accumulate these updates on two 1926 // separate fields (*_diffs) and we'll just add them to the "main" 1927 // fields at the start of a GC. 1928 1929 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1930 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1931 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1932 1933 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1934 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 1935 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1936 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1937 1938 hr->set_recorded_rs_length(new_rs_length); 1939 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1940 } 1941 1942 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1943 assert(hr->is_young(), "invariant"); 1944 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1945 assert(_inc_cset_build_state == Active, "Precondition"); 1946 1947 // We need to clear and set the cached recorded/cached collection set 1948 // information in the heap region here (before the region gets added 1949 // to the collection set). An individual heap region's cached values 1950 // are calculated, aggregated with the policy collection set info, 1951 // and cached in the heap region here (initially) and (subsequently) 1952 // by the Young List sampling code. 1953 1954 size_t rs_length = hr->rem_set()->occupied(); 1955 add_to_incremental_cset_info(hr, rs_length); 1956 1957 assert(!hr->in_collection_set(), "invariant"); 1958 _g1->register_young_region_with_cset(hr); 1959 assert(hr->next_in_collection_set() == NULL, "invariant"); 1960 } 1961 1962 // Add the region at the RHS of the incremental cset 1963 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1964 // We should only ever be appending survivors at the end of a pause 1965 assert(hr->is_survivor(), "Logic"); 1966 1967 // Do the 'common' stuff 1968 add_region_to_incremental_cset_common(hr); 1969 1970 // Now add the region at the right hand side 1971 if (_inc_cset_tail == NULL) { 1972 assert(_inc_cset_head == NULL, "invariant"); 1973 _inc_cset_head = hr; 1974 } else { 1975 _inc_cset_tail->set_next_in_collection_set(hr); 1976 } 1977 _inc_cset_tail = hr; 1978 } 1979 1980 // Add the region to the LHS of the incremental cset 1981 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1982 // Survivors should be added to the RHS at the end of a pause 1983 assert(hr->is_eden(), "Logic"); 1984 1985 // Do the 'common' stuff 1986 add_region_to_incremental_cset_common(hr); 1987 1988 // Add the region at the left hand side 1989 hr->set_next_in_collection_set(_inc_cset_head); 1990 if (_inc_cset_head == NULL) { 1991 assert(_inc_cset_tail == NULL, "Invariant"); 1992 _inc_cset_tail = hr; 1993 } 1994 _inc_cset_head = hr; 1995 } 1996 1997 #ifndef PRODUCT 1998 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1999 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 2000 2001 st->print_cr("\nCollection_set:"); 2002 HeapRegion* csr = list_head; 2003 while (csr != NULL) { 2004 HeapRegion* next = csr->next_in_collection_set(); 2005 assert(csr->in_collection_set(), "bad CS"); 2006 st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", 2007 HR_FORMAT_PARAMS(csr), 2008 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()), 2009 csr->age_in_surv_rate_group_cond()); 2010 csr = next; 2011 } 2012 } 2013 #endif // !PRODUCT 2014 2015 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 2016 // Returns the given amount of reclaimable bytes (that represents 2017 // the amount of reclaimable space still to be collected) as a 2018 // percentage of the current heap capacity. 2019 size_t capacity_bytes = _g1->capacity(); 2020 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 2021 } 2022 2023 void G1CollectorPolicy::maybe_start_marking() { 2024 if (need_to_start_conc_mark("end of GC")) { 2025 // Note: this might have already been set, if during the last 2026 // pause we decided to start a cycle but at the beginning of 2027 // this pause we decided to postpone it. That's OK. 2028 collector_state()->set_initiate_conc_mark_if_possible(true); 2029 } 2030 } 2031 2032 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 2033 assert(!collector_state()->full_collection(), "must be"); 2034 if (collector_state()->during_initial_mark_pause()) { 2035 assert(collector_state()->last_gc_was_young(), "must be"); 2036 assert(!collector_state()->last_young_gc(), "must be"); 2037 return InitialMarkGC; 2038 } else if (collector_state()->last_young_gc()) { 2039 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2040 assert(collector_state()->last_gc_was_young(), "must be"); 2041 return LastYoungGC; 2042 } else if (!collector_state()->last_gc_was_young()) { 2043 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2044 assert(!collector_state()->last_young_gc(), "must be"); 2045 return MixedGC; 2046 } else { 2047 assert(collector_state()->last_gc_was_young(), "must be"); 2048 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2049 assert(!collector_state()->last_young_gc(), "must be"); 2050 return YoungOnlyGC; 2051 } 2052 } 2053 2054 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 2055 // Manage the MMU tracker. For some reason it ignores Full GCs. 2056 if (kind != FullGC) { 2057 _mmu_tracker->add_pause(start, end); 2058 } 2059 // Manage the mutator time tracking from initial mark to first mixed gc. 2060 switch (kind) { 2061 case FullGC: 2062 abort_time_to_mixed_tracking(); 2063 break; 2064 case Cleanup: 2065 case Remark: 2066 case YoungOnlyGC: 2067 case LastYoungGC: 2068 _initial_mark_to_mixed.add_pause(end - start); 2069 break; 2070 case InitialMarkGC: 2071 _initial_mark_to_mixed.record_initial_mark_end(end); 2072 break; 2073 case MixedGC: 2074 _initial_mark_to_mixed.record_mixed_gc_start(start); 2075 break; 2076 default: 2077 ShouldNotReachHere(); 2078 } 2079 } 2080 2081 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 2082 _initial_mark_to_mixed.reset(); 2083 } 2084 2085 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 2086 const char* false_action_str) const { 2087 if (cset_chooser()->is_empty()) { 2088 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 2089 return false; 2090 } 2091 2092 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 2093 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 2094 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2095 double threshold = (double) G1HeapWastePercent; 2096 if (reclaimable_perc <= threshold) { 2097 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 2098 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 2099 return false; 2100 } 2101 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 2102 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 2103 return true; 2104 } 2105 2106 uint G1CollectorPolicy::calc_min_old_cset_length() const { 2107 // The min old CSet region bound is based on the maximum desired 2108 // number of mixed GCs after a cycle. I.e., even if some old regions 2109 // look expensive, we should add them to the CSet anyway to make 2110 // sure we go through the available old regions in no more than the 2111 // maximum desired number of mixed GCs. 2112 // 2113 // The calculation is based on the number of marked regions we added 2114 // to the CSet chooser in the first place, not how many remain, so 2115 // that the result is the same during all mixed GCs that follow a cycle. 2116 2117 const size_t region_num = (size_t) cset_chooser()->length(); 2118 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 2119 size_t result = region_num / gc_num; 2120 // emulate ceiling 2121 if (result * gc_num < region_num) { 2122 result += 1; 2123 } 2124 return (uint) result; 2125 } 2126 2127 uint G1CollectorPolicy::calc_max_old_cset_length() const { 2128 // The max old CSet region bound is based on the threshold expressed 2129 // as a percentage of the heap size. I.e., it should bound the 2130 // number of old regions added to the CSet irrespective of how many 2131 // of them are available. 2132 2133 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2134 const size_t region_num = g1h->num_regions(); 2135 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 2136 size_t result = region_num * perc / 100; 2137 // emulate ceiling 2138 if (100 * result < region_num * perc) { 2139 result += 1; 2140 } 2141 return (uint) result; 2142 } 2143 2144 2145 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) { 2146 double young_start_time_sec = os::elapsedTime(); 2147 2148 YoungList* young_list = _g1->young_list(); 2149 finalize_incremental_cset_building(); 2150 2151 guarantee(target_pause_time_ms > 0.0, 2152 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); 2153 guarantee(_collection_set == NULL, "Precondition"); 2154 2155 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 2156 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 2157 2158 log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms", 2159 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 2160 2161 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young()); 2162 2163 // The young list is laid with the survivor regions from the previous 2164 // pause are appended to the RHS of the young list, i.e. 2165 // [Newly Young Regions ++ Survivors from last pause]. 2166 2167 uint survivor_region_length = young_list->survivor_length(); 2168 uint eden_region_length = young_list->eden_length(); 2169 init_cset_region_lengths(eden_region_length, survivor_region_length); 2170 2171 HeapRegion* hr = young_list->first_survivor_region(); 2172 while (hr != NULL) { 2173 assert(hr->is_survivor(), "badly formed young list"); 2174 // There is a convention that all the young regions in the CSet 2175 // are tagged as "eden", so we do this for the survivors here. We 2176 // use the special set_eden_pre_gc() as it doesn't check that the 2177 // region is free (which is not the case here). 2178 hr->set_eden_pre_gc(); 2179 hr = hr->get_next_young_region(); 2180 } 2181 2182 // Clear the fields that point to the survivor list - they are all young now. 2183 young_list->clear_survivors(); 2184 2185 _collection_set = _inc_cset_head; 2186 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 2187 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 2188 2189 log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms", 2190 eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms); 2191 2192 // The number of recorded young regions is the incremental 2193 // collection set's current size 2194 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 2195 2196 double young_end_time_sec = os::elapsedTime(); 2197 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 2198 2199 return time_remaining_ms; 2200 } 2201 2202 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) { 2203 double non_young_start_time_sec = os::elapsedTime(); 2204 double predicted_old_time_ms = 0.0; 2205 2206 2207 if (!collector_state()->gcs_are_young()) { 2208 cset_chooser()->verify(); 2209 const uint min_old_cset_length = calc_min_old_cset_length(); 2210 const uint max_old_cset_length = calc_max_old_cset_length(); 2211 2212 uint expensive_region_num = 0; 2213 bool check_time_remaining = adaptive_young_list_length(); 2214 2215 HeapRegion* hr = cset_chooser()->peek(); 2216 while (hr != NULL) { 2217 if (old_cset_region_length() >= max_old_cset_length) { 2218 // Added maximum number of old regions to the CSet. 2219 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions", 2220 old_cset_region_length(), max_old_cset_length); 2221 break; 2222 } 2223 2224 2225 // Stop adding regions if the remaining reclaimable space is 2226 // not above G1HeapWastePercent. 2227 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 2228 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2229 double threshold = (double) G1HeapWastePercent; 2230 if (reclaimable_perc <= threshold) { 2231 // We've added enough old regions that the amount of uncollected 2232 // reclaimable space is at or below the waste threshold. Stop 2233 // adding old regions to the CSet. 2234 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). " 2235 "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%", 2236 old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 2237 break; 2238 } 2239 2240 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 2241 if (check_time_remaining) { 2242 if (predicted_time_ms > time_remaining_ms) { 2243 // Too expensive for the current CSet. 2244 2245 if (old_cset_region_length() >= min_old_cset_length) { 2246 // We have added the minimum number of old regions to the CSet, 2247 // we are done with this CSet. 2248 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). " 2249 "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions", 2250 predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length); 2251 break; 2252 } 2253 2254 // We'll add it anyway given that we haven't reached the 2255 // minimum number of old regions. 2256 expensive_region_num += 1; 2257 } 2258 } else { 2259 if (old_cset_region_length() >= min_old_cset_length) { 2260 // In the non-auto-tuning case, we'll finish adding regions 2261 // to the CSet if we reach the minimum. 2262 2263 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions", 2264 old_cset_region_length(), min_old_cset_length); 2265 break; 2266 } 2267 } 2268 2269 // We will add this region to the CSet. 2270 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2271 predicted_old_time_ms += predicted_time_ms; 2272 cset_chooser()->pop(); // already have region via peek() 2273 _g1->old_set_remove(hr); 2274 add_old_region_to_cset(hr); 2275 2276 hr = cset_chooser()->peek(); 2277 } 2278 if (hr == NULL) { 2279 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)"); 2280 } 2281 2282 if (expensive_region_num > 0) { 2283 // We print the information once here at the end, predicated on 2284 // whether we added any apparently expensive regions or not, to 2285 // avoid generating output per region. 2286 log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)." 2287 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms", 2288 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms); 2289 } 2290 2291 cset_chooser()->verify(); 2292 } 2293 2294 stop_incremental_cset_building(); 2295 2296 log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f", 2297 old_cset_region_length(), predicted_old_time_ms, time_remaining_ms); 2298 2299 double non_young_end_time_sec = os::elapsedTime(); 2300 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2301 }