1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMark.hpp" 28 #include "gc/g1/concurrentMarkThread.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1IHOPControl.hpp" 32 #include "gc/g1/g1GCPhaseTimes.hpp" 33 #include "gc/g1/heapRegion.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/shared/gcPolicyCounters.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "utilities/debug.hpp" 40 #include "utilities/pair.hpp" 41 42 // Different defaults for different number of GC threads 43 // They were chosen by running GCOld and SPECjbb on debris with different 44 // numbers of GC threads and choosing them based on the results 45 46 // all the same 47 static double rs_length_diff_defaults[] = { 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 49 }; 50 51 static double cost_per_card_ms_defaults[] = { 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 53 }; 54 55 // all the same 56 static double young_cards_per_entry_ratio_defaults[] = { 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 58 }; 59 60 static double cost_per_entry_ms_defaults[] = { 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 62 }; 63 64 static double cost_per_byte_ms_defaults[] = { 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 66 }; 67 68 // these should be pretty consistent 69 static double constant_other_time_ms_defaults[] = { 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 71 }; 72 73 74 static double young_other_cost_per_region_ms_defaults[] = { 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 76 }; 77 78 static double non_young_other_cost_per_region_ms_defaults[] = { 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 80 }; 81 82 G1CollectorPolicy::G1CollectorPolicy() : 83 _predictor(G1ConfidencePercent / 100.0), 84 _parallel_gc_threads(ParallelGCThreads), 85 86 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 87 _stop_world_start(0.0), 88 89 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 91 92 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 93 _prev_collection_pause_end_ms(0.0), 94 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 105 _non_young_other_cost_per_region_ms_seq( 106 new TruncatedSeq(TruncatedSeqLength)), 107 108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 110 111 _pause_time_target_ms((double) MaxGCPauseMillis), 112 113 _recent_prev_end_times_for_all_gcs_sec( 114 new TruncatedSeq(NumPrevPausesForHeuristics)), 115 116 _recent_avg_pause_time_ratio(0.0), 117 _rs_lengths_prediction(0), 118 _max_survivor_regions(0), 119 120 _eden_cset_region_length(0), 121 _survivor_cset_region_length(0), 122 _old_cset_region_length(0), 123 124 _collection_set(NULL), 125 _collection_set_bytes_used_before(0), 126 127 // Incremental CSet attributes 128 _inc_cset_build_state(Inactive), 129 _inc_cset_head(NULL), 130 _inc_cset_tail(NULL), 131 _inc_cset_bytes_used_before(0), 132 _inc_cset_max_finger(NULL), 133 _inc_cset_recorded_rs_lengths(0), 134 _inc_cset_recorded_rs_lengths_diffs(0), 135 _inc_cset_predicted_elapsed_time_ms(0.0), 136 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 137 138 // add here any more surv rate groups 139 _recorded_survivor_regions(0), 140 _recorded_survivor_head(NULL), 141 _recorded_survivor_tail(NULL), 142 _survivors_age_table(true), 143 144 _gc_overhead_perc(0.0), 145 146 _bytes_allocated_in_old_since_last_gc(0), 147 _ihop_control(NULL), 148 _initial_mark_to_mixed() { 149 150 // SurvRateGroups below must be initialized after the predictor because they 151 // indirectly use it through this object passed to their constructor. 152 _short_lived_surv_rate_group = 153 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 154 _survivor_surv_rate_group = 155 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 156 157 // Set up the region size and associated fields. Given that the 158 // policy is created before the heap, we have to set this up here, 159 // so it's done as soon as possible. 160 161 // It would have been natural to pass initial_heap_byte_size() and 162 // max_heap_byte_size() to setup_heap_region_size() but those have 163 // not been set up at this point since they should be aligned with 164 // the region size. So, there is a circular dependency here. We base 165 // the region size on the heap size, but the heap size should be 166 // aligned with the region size. To get around this we use the 167 // unaligned values for the heap. 168 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 169 HeapRegionRemSet::setup_remset_size(); 170 171 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 172 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 173 clear_ratio_check_data(); 174 175 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 176 177 int index = MIN2(_parallel_gc_threads - 1, 7); 178 179 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 180 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 181 _cost_scan_hcc_seq->add(0.0); 182 _young_cards_per_entry_ratio_seq->add( 183 young_cards_per_entry_ratio_defaults[index]); 184 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 185 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 186 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 187 _young_other_cost_per_region_ms_seq->add( 188 young_other_cost_per_region_ms_defaults[index]); 189 _non_young_other_cost_per_region_ms_seq->add( 190 non_young_other_cost_per_region_ms_defaults[index]); 191 192 // Below, we might need to calculate the pause time target based on 193 // the pause interval. When we do so we are going to give G1 maximum 194 // flexibility and allow it to do pauses when it needs to. So, we'll 195 // arrange that the pause interval to be pause time target + 1 to 196 // ensure that a) the pause time target is maximized with respect to 197 // the pause interval and b) we maintain the invariant that pause 198 // time target < pause interval. If the user does not want this 199 // maximum flexibility, they will have to set the pause interval 200 // explicitly. 201 202 // First make sure that, if either parameter is set, its value is 203 // reasonable. 204 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 205 if (MaxGCPauseMillis < 1) { 206 vm_exit_during_initialization("MaxGCPauseMillis should be " 207 "greater than 0"); 208 } 209 } 210 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 211 if (GCPauseIntervalMillis < 1) { 212 vm_exit_during_initialization("GCPauseIntervalMillis should be " 213 "greater than 0"); 214 } 215 } 216 217 // Then, if the pause time target parameter was not set, set it to 218 // the default value. 219 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 220 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 221 // The default pause time target in G1 is 200ms 222 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 223 } else { 224 // We do not allow the pause interval to be set without the 225 // pause time target 226 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 227 "without setting MaxGCPauseMillis"); 228 } 229 } 230 231 // Then, if the interval parameter was not set, set it according to 232 // the pause time target (this will also deal with the case when the 233 // pause time target is the default value). 234 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 235 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 236 } 237 238 // Finally, make sure that the two parameters are consistent. 239 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 240 char buffer[256]; 241 jio_snprintf(buffer, 256, 242 "MaxGCPauseMillis (%u) should be less than " 243 "GCPauseIntervalMillis (%u)", 244 MaxGCPauseMillis, GCPauseIntervalMillis); 245 vm_exit_during_initialization(buffer); 246 } 247 248 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 249 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 250 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 251 252 // start conservatively (around 50ms is about right) 253 _concurrent_mark_remark_times_ms->add(0.05); 254 _concurrent_mark_cleanup_times_ms->add(0.20); 255 _tenuring_threshold = MaxTenuringThreshold; 256 257 assert(GCTimeRatio > 0, 258 "we should have set it to a default value set_g1_gc_flags() " 259 "if a user set it to 0"); 260 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 261 262 uintx reserve_perc = G1ReservePercent; 263 // Put an artificial ceiling on this so that it's not set to a silly value. 264 if (reserve_perc > 50) { 265 reserve_perc = 50; 266 warning("G1ReservePercent is set to a value that is too large, " 267 "it's been updated to " UINTX_FORMAT, reserve_perc); 268 } 269 _reserve_factor = (double) reserve_perc / 100.0; 270 // This will be set when the heap is expanded 271 // for the first time during initialization. 272 _reserve_regions = 0; 273 274 _cset_chooser = new CollectionSetChooser(); 275 } 276 277 G1CollectorPolicy::~G1CollectorPolicy() { 278 delete _ihop_control; 279 } 280 281 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 282 return _predictor.get_new_prediction(seq); 283 } 284 285 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { 286 return (size_t)get_new_prediction(seq); 287 } 288 289 void G1CollectorPolicy::initialize_alignments() { 290 _space_alignment = HeapRegion::GrainBytes; 291 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 292 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 293 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 294 } 295 296 void G1CollectorPolicy::initialize_flags() { 297 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 298 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 299 } 300 301 if (SurvivorRatio < 1) { 302 vm_exit_during_initialization("Invalid survivor ratio specified"); 303 } 304 CollectorPolicy::initialize_flags(); 305 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 306 } 307 308 void G1CollectorPolicy::post_heap_initialize() { 309 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 310 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 311 if (max_young_size != MaxNewSize) { 312 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 313 } 314 315 _ihop_control = create_ihop_control(); 316 } 317 318 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 319 320 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), 321 _min_desired_young_length(0), _max_desired_young_length(0) { 322 if (FLAG_IS_CMDLINE(NewRatio)) { 323 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 324 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 325 } else { 326 _sizer_kind = SizerNewRatio; 327 _adaptive_size = false; 328 return; 329 } 330 } 331 332 if (NewSize > MaxNewSize) { 333 if (FLAG_IS_CMDLINE(MaxNewSize)) { 334 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 335 "A new max generation size of " SIZE_FORMAT "k will be used.", 336 NewSize/K, MaxNewSize/K, NewSize/K); 337 } 338 MaxNewSize = NewSize; 339 } 340 341 if (FLAG_IS_CMDLINE(NewSize)) { 342 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 343 1U); 344 if (FLAG_IS_CMDLINE(MaxNewSize)) { 345 _max_desired_young_length = 346 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 347 1U); 348 _sizer_kind = SizerMaxAndNewSize; 349 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 350 } else { 351 _sizer_kind = SizerNewSizeOnly; 352 } 353 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 354 _max_desired_young_length = 355 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 356 1U); 357 _sizer_kind = SizerMaxNewSizeOnly; 358 } 359 } 360 361 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 362 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 363 return MAX2(1U, default_value); 364 } 365 366 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 367 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 368 return MAX2(1U, default_value); 369 } 370 371 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { 372 assert(number_of_heap_regions > 0, "Heap must be initialized"); 373 374 switch (_sizer_kind) { 375 case SizerDefaults: 376 *min_young_length = calculate_default_min_length(number_of_heap_regions); 377 *max_young_length = calculate_default_max_length(number_of_heap_regions); 378 break; 379 case SizerNewSizeOnly: 380 *max_young_length = calculate_default_max_length(number_of_heap_regions); 381 *max_young_length = MAX2(*min_young_length, *max_young_length); 382 break; 383 case SizerMaxNewSizeOnly: 384 *min_young_length = calculate_default_min_length(number_of_heap_regions); 385 *min_young_length = MIN2(*min_young_length, *max_young_length); 386 break; 387 case SizerMaxAndNewSize: 388 // Do nothing. Values set on the command line, don't update them at runtime. 389 break; 390 case SizerNewRatio: 391 *min_young_length = number_of_heap_regions / (NewRatio + 1); 392 *max_young_length = *min_young_length; 393 break; 394 default: 395 ShouldNotReachHere(); 396 } 397 398 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); 399 } 400 401 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { 402 // We need to pass the desired values because recalculation may not update these 403 // values in some cases. 404 uint temp = _min_desired_young_length; 405 uint result = _max_desired_young_length; 406 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); 407 return result; 408 } 409 410 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 411 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, 412 &_max_desired_young_length); 413 } 414 415 void G1CollectorPolicy::init() { 416 // Set aside an initial future to_space. 417 _g1 = G1CollectedHeap::heap(); 418 419 assert(Heap_lock->owned_by_self(), "Locking discipline."); 420 421 initialize_gc_policy_counters(); 422 423 if (adaptive_young_list_length()) { 424 _young_list_fixed_length = 0; 425 } else { 426 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 427 } 428 _free_regions_at_end_of_collection = _g1->num_free_regions(); 429 430 update_young_list_max_and_target_length(); 431 // We may immediately start allocating regions and placing them on the 432 // collection set list. Initialize the per-collection set info 433 start_incremental_cset_building(); 434 } 435 436 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 437 phase_times()->note_gc_start(num_active_workers); 438 } 439 440 // Create the jstat counters for the policy. 441 void G1CollectorPolicy::initialize_gc_policy_counters() { 442 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 443 } 444 445 bool G1CollectorPolicy::predict_will_fit(uint young_length, 446 double base_time_ms, 447 uint base_free_regions, 448 double target_pause_time_ms) const { 449 if (young_length >= base_free_regions) { 450 // end condition 1: not enough space for the young regions 451 return false; 452 } 453 454 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 455 size_t bytes_to_copy = 456 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 457 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 458 double young_other_time_ms = predict_young_other_time_ms(young_length); 459 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 460 if (pause_time_ms > target_pause_time_ms) { 461 // end condition 2: prediction is over the target pause time 462 return false; 463 } 464 465 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 466 467 // When copying, we will likely need more bytes free than is live in the region. 468 // Add some safety margin to factor in the confidence of our guess, and the 469 // natural expected waste. 470 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 471 // of the calculation: the lower the confidence, the more headroom. 472 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 473 // copying due to anticipated waste in the PLABs. 474 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 475 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 476 477 if (expected_bytes_to_copy > free_bytes) { 478 // end condition 3: out-of-space 479 return false; 480 } 481 482 // success! 483 return true; 484 } 485 486 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 487 // re-calculate the necessary reserve 488 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 489 // We use ceiling so that if reserve_regions_d is > 0.0 (but 490 // smaller than 1.0) we'll get 1. 491 _reserve_regions = (uint) ceil(reserve_regions_d); 492 493 _young_gen_sizer->heap_size_changed(new_number_of_regions); 494 } 495 496 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 497 uint base_min_length) const { 498 uint desired_min_length = 0; 499 if (adaptive_young_list_length()) { 500 if (_alloc_rate_ms_seq->num() > 3) { 501 double now_sec = os::elapsedTime(); 502 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 503 double alloc_rate_ms = predict_alloc_rate_ms(); 504 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 505 } else { 506 // otherwise we don't have enough info to make the prediction 507 } 508 } 509 desired_min_length += base_min_length; 510 // make sure we don't go below any user-defined minimum bound 511 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 512 } 513 514 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 515 // Here, we might want to also take into account any additional 516 // constraints (i.e., user-defined minimum bound). Currently, we 517 // effectively don't set this bound. 518 return _young_gen_sizer->max_desired_young_length(); 519 } 520 521 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 522 return update_young_list_max_and_target_length(get_new_size_prediction(_rs_lengths_seq)); 523 } 524 525 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 526 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 527 update_max_gc_locker_expansion(); 528 return unbounded_target_length; 529 } 530 531 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 532 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 533 _young_list_target_length = young_lengths.first; 534 return young_lengths.second; 535 } 536 537 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 538 YoungTargetLengths result; 539 540 // Calculate the absolute and desired min bounds first. 541 542 // This is how many young regions we already have (currently: the survivors). 543 uint base_min_length = recorded_survivor_regions(); 544 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 545 // This is the absolute minimum young length. Ensure that we 546 // will at least have one eden region available for allocation. 547 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 548 // If we shrank the young list target it should not shrink below the current size. 549 desired_min_length = MAX2(desired_min_length, absolute_min_length); 550 // Calculate the absolute and desired max bounds. 551 552 uint desired_max_length = calculate_young_list_desired_max_length(); 553 554 uint young_list_target_length = 0; 555 if (adaptive_young_list_length()) { 556 if (collector_state()->gcs_are_young()) { 557 young_list_target_length = 558 calculate_young_list_target_length(rs_lengths, 559 base_min_length, 560 desired_min_length, 561 desired_max_length); 562 } else { 563 // Don't calculate anything and let the code below bound it to 564 // the desired_min_length, i.e., do the next GC as soon as 565 // possible to maximize how many old regions we can add to it. 566 } 567 } else { 568 // The user asked for a fixed young gen so we'll fix the young gen 569 // whether the next GC is young or mixed. 570 young_list_target_length = _young_list_fixed_length; 571 } 572 573 result.second = young_list_target_length; 574 575 // We will try our best not to "eat" into the reserve. 576 uint absolute_max_length = 0; 577 if (_free_regions_at_end_of_collection > _reserve_regions) { 578 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 579 } 580 if (desired_max_length > absolute_max_length) { 581 desired_max_length = absolute_max_length; 582 } 583 584 // Make sure we don't go over the desired max length, nor under the 585 // desired min length. In case they clash, desired_min_length wins 586 // which is why that test is second. 587 if (young_list_target_length > desired_max_length) { 588 young_list_target_length = desired_max_length; 589 } 590 if (young_list_target_length < desired_min_length) { 591 young_list_target_length = desired_min_length; 592 } 593 594 assert(young_list_target_length > recorded_survivor_regions(), 595 "we should be able to allocate at least one eden region"); 596 assert(young_list_target_length >= absolute_min_length, "post-condition"); 597 598 result.first = young_list_target_length; 599 return result; 600 } 601 602 uint 603 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 604 uint base_min_length, 605 uint desired_min_length, 606 uint desired_max_length) const { 607 assert(adaptive_young_list_length(), "pre-condition"); 608 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 609 610 // In case some edge-condition makes the desired max length too small... 611 if (desired_max_length <= desired_min_length) { 612 return desired_min_length; 613 } 614 615 // We'll adjust min_young_length and max_young_length not to include 616 // the already allocated young regions (i.e., so they reflect the 617 // min and max eden regions we'll allocate). The base_min_length 618 // will be reflected in the predictions by the 619 // survivor_regions_evac_time prediction. 620 assert(desired_min_length > base_min_length, "invariant"); 621 uint min_young_length = desired_min_length - base_min_length; 622 assert(desired_max_length > base_min_length, "invariant"); 623 uint max_young_length = desired_max_length - base_min_length; 624 625 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 626 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 627 size_t pending_cards = get_new_size_prediction(_pending_cards_seq); 628 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 629 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 630 double base_time_ms = 631 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 632 survivor_regions_evac_time; 633 uint available_free_regions = _free_regions_at_end_of_collection; 634 uint base_free_regions = 0; 635 if (available_free_regions > _reserve_regions) { 636 base_free_regions = available_free_regions - _reserve_regions; 637 } 638 639 // Here, we will make sure that the shortest young length that 640 // makes sense fits within the target pause time. 641 642 if (predict_will_fit(min_young_length, base_time_ms, 643 base_free_regions, target_pause_time_ms)) { 644 // The shortest young length will fit into the target pause time; 645 // we'll now check whether the absolute maximum number of young 646 // regions will fit in the target pause time. If not, we'll do 647 // a binary search between min_young_length and max_young_length. 648 if (predict_will_fit(max_young_length, base_time_ms, 649 base_free_regions, target_pause_time_ms)) { 650 // The maximum young length will fit into the target pause time. 651 // We are done so set min young length to the maximum length (as 652 // the result is assumed to be returned in min_young_length). 653 min_young_length = max_young_length; 654 } else { 655 // The maximum possible number of young regions will not fit within 656 // the target pause time so we'll search for the optimal 657 // length. The loop invariants are: 658 // 659 // min_young_length < max_young_length 660 // min_young_length is known to fit into the target pause time 661 // max_young_length is known not to fit into the target pause time 662 // 663 // Going into the loop we know the above hold as we've just 664 // checked them. Every time around the loop we check whether 665 // the middle value between min_young_length and 666 // max_young_length fits into the target pause time. If it 667 // does, it becomes the new min. If it doesn't, it becomes 668 // the new max. This way we maintain the loop invariants. 669 670 assert(min_young_length < max_young_length, "invariant"); 671 uint diff = (max_young_length - min_young_length) / 2; 672 while (diff > 0) { 673 uint young_length = min_young_length + diff; 674 if (predict_will_fit(young_length, base_time_ms, 675 base_free_regions, target_pause_time_ms)) { 676 min_young_length = young_length; 677 } else { 678 max_young_length = young_length; 679 } 680 assert(min_young_length < max_young_length, "invariant"); 681 diff = (max_young_length - min_young_length) / 2; 682 } 683 // The results is min_young_length which, according to the 684 // loop invariants, should fit within the target pause time. 685 686 // These are the post-conditions of the binary search above: 687 assert(min_young_length < max_young_length, 688 "otherwise we should have discovered that max_young_length " 689 "fits into the pause target and not done the binary search"); 690 assert(predict_will_fit(min_young_length, base_time_ms, 691 base_free_regions, target_pause_time_ms), 692 "min_young_length, the result of the binary search, should " 693 "fit into the pause target"); 694 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 695 base_free_regions, target_pause_time_ms), 696 "min_young_length, the result of the binary search, should be " 697 "optimal, so no larger length should fit into the pause target"); 698 } 699 } else { 700 // Even the minimum length doesn't fit into the pause time 701 // target, return it as the result nevertheless. 702 } 703 return base_min_length + min_young_length; 704 } 705 706 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 707 double survivor_regions_evac_time = 0.0; 708 for (HeapRegion * r = _recorded_survivor_head; 709 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 710 r = r->get_next_young_region()) { 711 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 712 } 713 return survivor_regions_evac_time; 714 } 715 716 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 717 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 718 719 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 720 if (rs_lengths > _rs_lengths_prediction) { 721 // add 10% to avoid having to recalculate often 722 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 723 update_rs_lengths_prediction(rs_lengths_prediction); 724 725 update_young_list_max_and_target_length(rs_lengths_prediction); 726 } 727 } 728 729 void G1CollectorPolicy::update_rs_lengths_prediction() { 730 update_rs_lengths_prediction(get_new_size_prediction(_rs_lengths_seq)); 731 } 732 733 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 734 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 735 _rs_lengths_prediction = prediction; 736 } 737 } 738 739 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 740 bool is_tlab, 741 bool* gc_overhead_limit_was_exceeded) { 742 guarantee(false, "Not using this policy feature yet."); 743 return NULL; 744 } 745 746 // This method controls how a collector handles one or more 747 // of its generations being fully allocated. 748 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 749 bool is_tlab) { 750 guarantee(false, "Not using this policy feature yet."); 751 return NULL; 752 } 753 754 755 #ifndef PRODUCT 756 bool G1CollectorPolicy::verify_young_ages() { 757 HeapRegion* head = _g1->young_list()->first_region(); 758 return 759 verify_young_ages(head, _short_lived_surv_rate_group); 760 // also call verify_young_ages on any additional surv rate groups 761 } 762 763 bool 764 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 765 SurvRateGroup *surv_rate_group) { 766 guarantee( surv_rate_group != NULL, "pre-condition" ); 767 768 const char* name = surv_rate_group->name(); 769 bool ret = true; 770 int prev_age = -1; 771 772 for (HeapRegion* curr = head; 773 curr != NULL; 774 curr = curr->get_next_young_region()) { 775 SurvRateGroup* group = curr->surv_rate_group(); 776 if (group == NULL && !curr->is_survivor()) { 777 log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name); 778 ret = false; 779 } 780 781 if (surv_rate_group == group) { 782 int age = curr->age_in_surv_rate_group(); 783 784 if (age < 0) { 785 log_info(gc, verify)("## %s: encountered negative age", name); 786 ret = false; 787 } 788 789 if (age <= prev_age) { 790 log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 791 ret = false; 792 } 793 prev_age = age; 794 } 795 } 796 797 return ret; 798 } 799 #endif // PRODUCT 800 801 void G1CollectorPolicy::record_full_collection_start() { 802 _full_collection_start_sec = os::elapsedTime(); 803 // Release the future to-space so that it is available for compaction into. 804 collector_state()->set_full_collection(true); 805 } 806 807 void G1CollectorPolicy::record_full_collection_end() { 808 // Consider this like a collection pause for the purposes of allocation 809 // since last pause. 810 double end_sec = os::elapsedTime(); 811 double full_gc_time_sec = end_sec - _full_collection_start_sec; 812 double full_gc_time_ms = full_gc_time_sec * 1000.0; 813 814 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms); 815 816 update_recent_gc_times(end_sec, full_gc_time_ms); 817 818 collector_state()->set_full_collection(false); 819 820 // "Nuke" the heuristics that control the young/mixed GC 821 // transitions and make sure we start with young GCs after the Full GC. 822 collector_state()->set_gcs_are_young(true); 823 collector_state()->set_last_young_gc(false); 824 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 825 collector_state()->set_during_initial_mark_pause(false); 826 collector_state()->set_in_marking_window(false); 827 collector_state()->set_in_marking_window_im(false); 828 829 _short_lived_surv_rate_group->start_adding_regions(); 830 // also call this on any additional surv rate groups 831 832 record_survivor_regions(0, NULL, NULL); 833 834 _free_regions_at_end_of_collection = _g1->num_free_regions(); 835 // Reset survivors SurvRateGroup. 836 _survivor_surv_rate_group->reset(); 837 update_young_list_max_and_target_length(); 838 update_rs_lengths_prediction(); 839 cset_chooser()->clear(); 840 841 _bytes_allocated_in_old_since_last_gc = 0; 842 843 record_pause(FullGC, _full_collection_start_sec, end_sec); 844 } 845 846 void G1CollectorPolicy::record_stop_world_start() { 847 _stop_world_start = os::elapsedTime(); 848 } 849 850 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 851 // We only need to do this here as the policy will only be applied 852 // to the GC we're about to start. so, no point is calculating this 853 // every time we calculate / recalculate the target young length. 854 update_survivors_policy(); 855 856 assert(_g1->used() == _g1->recalculate_used(), 857 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 858 _g1->used(), _g1->recalculate_used()); 859 860 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 861 _trace_young_gen_time_data.record_start_collection(s_w_t_ms); 862 _stop_world_start = 0.0; 863 864 phase_times()->record_cur_collection_start_sec(start_time_sec); 865 _pending_cards = _g1->pending_card_num(); 866 867 _collection_set_bytes_used_before = 0; 868 _bytes_copied_during_gc = 0; 869 870 collector_state()->set_last_gc_was_young(false); 871 872 // do that for any other surv rate groups 873 _short_lived_surv_rate_group->stop_adding_regions(); 874 _survivors_age_table.clear(); 875 876 assert( verify_young_ages(), "region age verification" ); 877 } 878 879 void G1CollectorPolicy::record_concurrent_mark_init_end(double 880 mark_init_elapsed_time_ms) { 881 collector_state()->set_during_marking(true); 882 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 883 collector_state()->set_during_initial_mark_pause(false); 884 } 885 886 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 887 _mark_remark_start_sec = os::elapsedTime(); 888 collector_state()->set_during_marking(false); 889 } 890 891 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 892 double end_time_sec = os::elapsedTime(); 893 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 894 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 895 _prev_collection_pause_end_ms += elapsed_time_ms; 896 897 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 898 } 899 900 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 901 _mark_cleanup_start_sec = os::elapsedTime(); 902 } 903 904 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 905 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 906 "skip last young-only gc"); 907 collector_state()->set_last_young_gc(should_continue_with_reclaim); 908 // We skip the marking phase. 909 if (!should_continue_with_reclaim) { 910 abort_time_to_mixed_tracking(); 911 } 912 collector_state()->set_in_marking_window(false); 913 } 914 915 void G1CollectorPolicy::record_concurrent_pause() { 916 if (_stop_world_start > 0.0) { 917 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 918 _trace_young_gen_time_data.record_yield_time(yield_ms); 919 } 920 } 921 922 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 923 return phase_times()->average_time_ms(phase); 924 } 925 926 double G1CollectorPolicy::young_other_time_ms() const { 927 return phase_times()->young_cset_choice_time_ms() + 928 phase_times()->young_free_cset_time_ms(); 929 } 930 931 double G1CollectorPolicy::non_young_other_time_ms() const { 932 return phase_times()->non_young_cset_choice_time_ms() + 933 phase_times()->non_young_free_cset_time_ms(); 934 935 } 936 937 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 938 return pause_time_ms - 939 average_time_ms(G1GCPhaseTimes::UpdateRS) - 940 average_time_ms(G1GCPhaseTimes::ScanRS) - 941 average_time_ms(G1GCPhaseTimes::ObjCopy) - 942 average_time_ms(G1GCPhaseTimes::Termination); 943 } 944 945 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 946 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 947 } 948 949 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 950 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 951 } 952 953 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 954 if (about_to_start_mixed_phase()) { 955 return false; 956 } 957 958 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 959 960 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 961 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 962 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 963 964 bool result = false; 965 if (marking_request_bytes > marking_initiating_used_threshold) { 966 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 967 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 968 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 969 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 970 } 971 972 return result; 973 } 974 975 // Anything below that is considered to be zero 976 #define MIN_TIMER_GRANULARITY 0.0000001 977 978 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 979 double end_time_sec = os::elapsedTime(); 980 981 size_t cur_used_bytes = _g1->used(); 982 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 983 bool last_pause_included_initial_mark = false; 984 bool update_stats = !_g1->evacuation_failed(); 985 986 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 987 988 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 989 990 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 991 if (last_pause_included_initial_mark) { 992 record_concurrent_mark_init_end(0.0); 993 } else { 994 maybe_start_marking(); 995 } 996 997 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 998 if (app_time_ms < MIN_TIMER_GRANULARITY) { 999 // This usually happens due to the timer not having the required 1000 // granularity. Some Linuxes are the usual culprits. 1001 // We'll just set it to something (arbitrarily) small. 1002 app_time_ms = 1.0; 1003 } 1004 1005 if (update_stats) { 1006 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); 1007 // We maintain the invariant that all objects allocated by mutator 1008 // threads will be allocated out of eden regions. So, we can use 1009 // the eden region number allocated since the previous GC to 1010 // calculate the application's allocate rate. The only exception 1011 // to that is humongous objects that are allocated separately. But 1012 // given that humongous object allocations do not really affect 1013 // either the pause's duration nor when the next pause will take 1014 // place we can safely ignore them here. 1015 uint regions_allocated = eden_cset_region_length(); 1016 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1017 _alloc_rate_ms_seq->add(alloc_rate_ms); 1018 1019 double interval_ms = 1020 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1021 update_recent_gc_times(end_time_sec, pause_time_ms); 1022 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1023 if (recent_avg_pause_time_ratio() < 0.0 || 1024 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1025 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1026 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1027 if (_recent_avg_pause_time_ratio < 0.0) { 1028 _recent_avg_pause_time_ratio = 0.0; 1029 } else { 1030 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1031 _recent_avg_pause_time_ratio = 1.0; 1032 } 1033 } 1034 1035 // Compute the ratio of just this last pause time to the entire time range stored 1036 // in the vectors. Comparing this pause to the entire range, rather than only the 1037 // most recent interval, has the effect of smoothing over a possible transient 'burst' 1038 // of more frequent pauses that don't really reflect a change in heap occupancy. 1039 // This reduces the likelihood of a needless heap expansion being triggered. 1040 _last_pause_time_ratio = 1041 (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; 1042 } 1043 1044 bool new_in_marking_window = collector_state()->in_marking_window(); 1045 bool new_in_marking_window_im = false; 1046 if (last_pause_included_initial_mark) { 1047 new_in_marking_window = true; 1048 new_in_marking_window_im = true; 1049 } 1050 1051 if (collector_state()->last_young_gc()) { 1052 // This is supposed to to be the "last young GC" before we start 1053 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1054 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 1055 1056 if (next_gc_should_be_mixed("start mixed GCs", 1057 "do not start mixed GCs")) { 1058 collector_state()->set_gcs_are_young(false); 1059 } else { 1060 // We aborted the mixed GC phase early. 1061 abort_time_to_mixed_tracking(); 1062 } 1063 1064 collector_state()->set_last_young_gc(false); 1065 } 1066 1067 if (!collector_state()->last_gc_was_young()) { 1068 // This is a mixed GC. Here we decide whether to continue doing 1069 // mixed GCs or not. 1070 if (!next_gc_should_be_mixed("continue mixed GCs", 1071 "do not continue mixed GCs")) { 1072 collector_state()->set_gcs_are_young(true); 1073 1074 maybe_start_marking(); 1075 } 1076 } 1077 1078 _short_lived_surv_rate_group->start_adding_regions(); 1079 // Do that for any other surv rate groups 1080 1081 if (update_stats) { 1082 double cost_per_card_ms = 0.0; 1083 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC); 1084 if (_pending_cards > 0) { 1085 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards; 1086 _cost_per_card_ms_seq->add(cost_per_card_ms); 1087 } 1088 _cost_scan_hcc_seq->add(cost_scan_hcc); 1089 1090 double cost_per_entry_ms = 0.0; 1091 if (cards_scanned > 10) { 1092 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 1093 if (collector_state()->last_gc_was_young()) { 1094 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1095 } else { 1096 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1097 } 1098 } 1099 1100 if (_max_rs_lengths > 0) { 1101 double cards_per_entry_ratio = 1102 (double) cards_scanned / (double) _max_rs_lengths; 1103 if (collector_state()->last_gc_was_young()) { 1104 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1105 } else { 1106 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1107 } 1108 } 1109 1110 // This is defensive. For a while _max_rs_lengths could get 1111 // smaller than _recorded_rs_lengths which was causing 1112 // rs_length_diff to get very large and mess up the RSet length 1113 // predictions. The reason was unsafe concurrent updates to the 1114 // _inc_cset_recorded_rs_lengths field which the code below guards 1115 // against (see CR 7118202). This bug has now been fixed (see CR 1116 // 7119027). However, I'm still worried that 1117 // _inc_cset_recorded_rs_lengths might still end up somewhat 1118 // inaccurate. The concurrent refinement thread calculates an 1119 // RSet's length concurrently with other CR threads updating it 1120 // which might cause it to calculate the length incorrectly (if, 1121 // say, it's in mid-coarsening). So I'll leave in the defensive 1122 // conditional below just in case. 1123 size_t rs_length_diff = 0; 1124 if (_max_rs_lengths > _recorded_rs_lengths) { 1125 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1126 } 1127 _rs_length_diff_seq->add((double) rs_length_diff); 1128 1129 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 1130 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1131 double cost_per_byte_ms = 0.0; 1132 1133 if (copied_bytes > 0) { 1134 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 1135 if (collector_state()->in_marking_window()) { 1136 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1137 } else { 1138 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1139 } 1140 } 1141 1142 if (young_cset_region_length() > 0) { 1143 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 1144 young_cset_region_length()); 1145 } 1146 1147 if (old_cset_region_length() > 0) { 1148 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 1149 old_cset_region_length()); 1150 } 1151 1152 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 1153 1154 _pending_cards_seq->add((double) _pending_cards); 1155 _rs_lengths_seq->add((double) _max_rs_lengths); 1156 } 1157 1158 collector_state()->set_in_marking_window(new_in_marking_window); 1159 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 1160 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1161 // IHOP control wants to know the expected young gen length if it were not 1162 // restrained by the heap reserve. Using the actual length would make the 1163 // prediction too small and the limit the young gen every time we get to the 1164 // predicted target occupancy. 1165 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 1166 update_rs_lengths_prediction(); 1167 1168 update_ihop_prediction(app_time_ms / 1000.0, 1169 _bytes_allocated_in_old_since_last_gc, 1170 last_unrestrained_young_length * HeapRegion::GrainBytes); 1171 _bytes_allocated_in_old_since_last_gc = 0; 1172 1173 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 1174 1175 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1176 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1177 1178 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC); 1179 1180 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1181 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 1182 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 1183 update_rs_time_goal_ms, scan_hcc_time_ms); 1184 1185 update_rs_time_goal_ms = 0; 1186 } else { 1187 update_rs_time_goal_ms -= scan_hcc_time_ms; 1188 } 1189 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1190 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1191 update_rs_time_goal_ms); 1192 1193 cset_chooser()->verify(); 1194 } 1195 1196 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1197 if (G1UseAdaptiveIHOP) { 1198 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1199 G1CollectedHeap::heap()->max_capacity(), 1200 &_predictor, 1201 G1ReservePercent, 1202 G1HeapWastePercent); 1203 } else { 1204 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, 1205 G1CollectedHeap::heap()->max_capacity()); 1206 } 1207 } 1208 1209 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 1210 size_t mutator_alloc_bytes, 1211 size_t young_gen_size) { 1212 // Always try to update IHOP prediction. Even evacuation failures give information 1213 // about e.g. whether to start IHOP earlier next time. 1214 1215 // Avoid using really small application times that might create samples with 1216 // very high or very low values. They may be caused by e.g. back-to-back gcs. 1217 double const min_valid_time = 1e-6; 1218 1219 bool report = false; 1220 1221 double marking_to_mixed_time = -1.0; 1222 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1223 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1224 assert(marking_to_mixed_time > 0.0, 1225 "Initial mark to mixed time must be larger than zero but is %.3f", 1226 marking_to_mixed_time); 1227 if (marking_to_mixed_time > min_valid_time) { 1228 _ihop_control->update_marking_length(marking_to_mixed_time); 1229 report = true; 1230 } 1231 } 1232 1233 // As an approximation for the young gc promotion rates during marking we use 1234 // all of them. In many applications there are only a few if any young gcs during 1235 // marking, which makes any prediction useless. This increases the accuracy of the 1236 // prediction. 1237 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1238 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1239 report = true; 1240 } 1241 1242 if (report) { 1243 report_ihop_statistics(); 1244 } 1245 } 1246 1247 void G1CollectorPolicy::report_ihop_statistics() { 1248 _ihop_control->print(); 1249 } 1250 1251 #define EXT_SIZE_FORMAT "%.1f%s" 1252 #define EXT_SIZE_PARAMS(bytes) \ 1253 byte_size_in_proper_unit((double)(bytes)), \ 1254 proper_unit_for_byte_size((bytes)) 1255 1256 void G1CollectorPolicy::print_phases() { 1257 phase_times()->print(); 1258 } 1259 1260 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1261 double update_rs_processed_buffers, 1262 double goal_ms) { 1263 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1264 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1265 1266 if (G1UseAdaptiveConcRefinement) { 1267 const int k_gy = 3, k_gr = 6; 1268 const double inc_k = 1.1, dec_k = 0.9; 1269 1270 int g = cg1r->green_zone(); 1271 if (update_rs_time > goal_ms) { 1272 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1273 } else { 1274 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1275 g = (int)MAX2(g * inc_k, g + 1.0); 1276 } 1277 } 1278 // Change the refinement threads params 1279 cg1r->set_green_zone(g); 1280 cg1r->set_yellow_zone(g * k_gy); 1281 cg1r->set_red_zone(g * k_gr); 1282 cg1r->reinitialize_threads(); 1283 1284 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1); 1285 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1286 cg1r->yellow_zone()); 1287 // Change the barrier params 1288 dcqs.set_process_completed_threshold(processing_threshold); 1289 dcqs.set_max_completed_queue(cg1r->red_zone()); 1290 } 1291 1292 int curr_queue_size = dcqs.completed_buffers_num(); 1293 if (curr_queue_size >= cg1r->yellow_zone()) { 1294 dcqs.set_completed_queue_padding(curr_queue_size); 1295 } else { 1296 dcqs.set_completed_queue_padding(0); 1297 } 1298 dcqs.notify_if_necessary(); 1299 } 1300 1301 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1302 return get_new_size_prediction(_rs_length_diff_seq); 1303 } 1304 1305 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1306 return get_new_prediction(_alloc_rate_ms_seq); 1307 } 1308 1309 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1310 return get_new_prediction(_cost_per_card_ms_seq); 1311 } 1312 1313 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1314 return get_new_prediction(_cost_scan_hcc_seq); 1315 } 1316 1317 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1318 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1319 } 1320 1321 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1322 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1323 } 1324 1325 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1326 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1327 return predict_young_cards_per_entry_ratio(); 1328 } else { 1329 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1330 } 1331 } 1332 1333 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1334 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1335 } 1336 1337 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1338 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1339 } 1340 1341 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1342 if (collector_state()->gcs_are_young()) { 1343 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1344 } else { 1345 return predict_mixed_rs_scan_time_ms(card_num); 1346 } 1347 } 1348 1349 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1350 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1351 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1352 } else { 1353 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1354 } 1355 } 1356 1357 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1358 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1359 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1360 } else { 1361 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1362 } 1363 } 1364 1365 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1366 if (collector_state()->during_concurrent_mark()) { 1367 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1368 } else { 1369 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1370 } 1371 } 1372 1373 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1374 return get_new_prediction(_constant_other_time_ms_seq); 1375 } 1376 1377 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1378 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1379 } 1380 1381 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1382 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1383 } 1384 1385 double G1CollectorPolicy::predict_remark_time_ms() const { 1386 return get_new_prediction(_concurrent_mark_remark_times_ms); 1387 } 1388 1389 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1390 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1391 } 1392 1393 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1394 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1395 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1396 double pred = get_new_prediction(seq); 1397 if (pred > 1.0) { 1398 pred = 1.0; 1399 } 1400 return pred; 1401 } 1402 1403 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1404 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1405 } 1406 1407 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1408 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1409 } 1410 1411 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1412 size_t scanned_cards) const { 1413 return 1414 predict_rs_update_time_ms(pending_cards) + 1415 predict_rs_scan_time_ms(scanned_cards) + 1416 predict_constant_other_time_ms(); 1417 } 1418 1419 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1420 size_t rs_length = predict_rs_length_diff(); 1421 size_t card_num; 1422 if (collector_state()->gcs_are_young()) { 1423 card_num = predict_young_card_num(rs_length); 1424 } else { 1425 card_num = predict_non_young_card_num(rs_length); 1426 } 1427 return predict_base_elapsed_time_ms(pending_cards, card_num); 1428 } 1429 1430 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1431 size_t bytes_to_copy; 1432 if (hr->is_marked()) 1433 bytes_to_copy = hr->max_live_bytes(); 1434 else { 1435 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1436 int age = hr->age_in_surv_rate_group(); 1437 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1438 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1439 } 1440 return bytes_to_copy; 1441 } 1442 1443 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1444 bool for_young_gc) const { 1445 size_t rs_length = hr->rem_set()->occupied(); 1446 size_t card_num; 1447 1448 // Predicting the number of cards is based on which type of GC 1449 // we're predicting for. 1450 if (for_young_gc) { 1451 card_num = predict_young_card_num(rs_length); 1452 } else { 1453 card_num = predict_non_young_card_num(rs_length); 1454 } 1455 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1456 1457 double region_elapsed_time_ms = 1458 predict_rs_scan_time_ms(card_num) + 1459 predict_object_copy_time_ms(bytes_to_copy); 1460 1461 // The prediction of the "other" time for this region is based 1462 // upon the region type and NOT the GC type. 1463 if (hr->is_young()) { 1464 region_elapsed_time_ms += predict_young_other_time_ms(1); 1465 } else { 1466 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1467 } 1468 return region_elapsed_time_ms; 1469 } 1470 1471 void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1472 uint survivor_cset_region_length) { 1473 _eden_cset_region_length = eden_cset_region_length; 1474 _survivor_cset_region_length = survivor_cset_region_length; 1475 _old_cset_region_length = 0; 1476 } 1477 1478 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1479 _recorded_rs_lengths = rs_lengths; 1480 } 1481 1482 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1483 double elapsed_ms) { 1484 _recent_gc_times_ms->add(elapsed_ms); 1485 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1486 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1487 } 1488 1489 void G1CollectorPolicy::clear_ratio_check_data() { 1490 _ratio_over_threshold_count = 0; 1491 _ratio_over_threshold_sum = 0.0; 1492 _pauses_since_start = 0; 1493 } 1494 1495 size_t G1CollectorPolicy::expansion_amount() { 1496 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1497 double last_gc_overhead = _last_pause_time_ratio * 100.0; 1498 double threshold = _gc_overhead_perc; 1499 size_t expand_bytes = 0; 1500 1501 // If the heap is at less than half its maximum size, scale the threshold down, 1502 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1503 // though the scaling code will likely keep the increase small. 1504 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1505 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1506 threshold = MAX2(threshold, 1.0); 1507 } 1508 1509 // If the last GC time ratio is over the threshold, increment the count of 1510 // times it has been exceeded, and add this ratio to the sum of exceeded 1511 // ratios. 1512 if (last_gc_overhead > threshold) { 1513 _ratio_over_threshold_count++; 1514 _ratio_over_threshold_sum += last_gc_overhead; 1515 } 1516 1517 // Check if we've had enough GC time ratio checks that were over the 1518 // threshold to trigger an expansion. We'll also expand if we've 1519 // reached the end of the history buffer and the average of all entries 1520 // is still over the threshold. This indicates a smaller number of GCs were 1521 // long enough to make the average exceed the threshold. 1522 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1523 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1524 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1525 size_t min_expand_bytes = HeapRegion::GrainBytes; 1526 size_t reserved_bytes = _g1->max_capacity(); 1527 size_t committed_bytes = _g1->capacity(); 1528 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1529 size_t expand_bytes_via_pct = 1530 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1531 double scale_factor = 1.0; 1532 1533 // If the current size is less than 1/4 of the Initial heap size, expand 1534 // by half of the delta between the current and Initial sizes. IE, grow 1535 // back quickly. 1536 // 1537 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1538 // the available expansion space, whichever is smaller, as the base 1539 // expansion size. Then possibly scale this size according to how much the 1540 // threshold has (on average) been exceeded by. If the delta is small 1541 // (less than the StartScaleDownAt value), scale the size down linearly, but 1542 // not by less than MinScaleDownFactor. If the delta is large (greater than 1543 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1544 // times the base size. The scaling will be linear in the range from 1545 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1546 // ScaleUpRange sets the rate of scaling up. 1547 if (committed_bytes < InitialHeapSize / 4) { 1548 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1549 } else { 1550 double const MinScaleDownFactor = 0.2; 1551 double const MaxScaleUpFactor = 2; 1552 double const StartScaleDownAt = _gc_overhead_perc; 1553 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1554 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1555 1556 double ratio_delta; 1557 if (filled_history_buffer) { 1558 ratio_delta = recent_gc_overhead - threshold; 1559 } else { 1560 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1561 } 1562 1563 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1564 if (ratio_delta < StartScaleDownAt) { 1565 scale_factor = ratio_delta / StartScaleDownAt; 1566 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1567 } else if (ratio_delta > StartScaleUpAt) { 1568 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1569 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1570 } 1571 } 1572 1573 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1574 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1575 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1576 1577 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1578 1579 // Ensure the expansion size is at least the minimum growth amount 1580 // and at most the remaining uncommitted byte size. 1581 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1582 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1583 1584 clear_ratio_check_data(); 1585 } else { 1586 // An expansion was not triggered. If we've started counting, increment 1587 // the number of checks we've made in the current window. If we've 1588 // reached the end of the window without resizing, clear the counters to 1589 // start again the next time we see a ratio above the threshold. 1590 if (_ratio_over_threshold_count > 0) { 1591 _pauses_since_start++; 1592 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1593 clear_ratio_check_data(); 1594 } 1595 } 1596 } 1597 1598 return expand_bytes; 1599 } 1600 1601 void G1CollectorPolicy::print_tracing_info() const { 1602 _trace_young_gen_time_data.print(); 1603 _trace_old_gen_time_data.print(); 1604 } 1605 1606 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1607 #ifndef PRODUCT 1608 _short_lived_surv_rate_group->print_surv_rate_summary(); 1609 // add this call for any other surv rate groups 1610 #endif // PRODUCT 1611 } 1612 1613 bool G1CollectorPolicy::is_young_list_full() const { 1614 uint young_list_length = _g1->young_list()->length(); 1615 uint young_list_target_length = _young_list_target_length; 1616 return young_list_length >= young_list_target_length; 1617 } 1618 1619 bool G1CollectorPolicy::can_expand_young_list() const { 1620 uint young_list_length = _g1->young_list()->length(); 1621 uint young_list_max_length = _young_list_max_length; 1622 return young_list_length < young_list_max_length; 1623 } 1624 1625 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1626 uint expansion_region_num = 0; 1627 if (GCLockerEdenExpansionPercent > 0) { 1628 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1629 double expansion_region_num_d = perc * (double) _young_list_target_length; 1630 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1631 // less than 1.0) we'll get 1. 1632 expansion_region_num = (uint) ceil(expansion_region_num_d); 1633 } else { 1634 assert(expansion_region_num == 0, "sanity"); 1635 } 1636 _young_list_max_length = _young_list_target_length + expansion_region_num; 1637 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1638 } 1639 1640 // Calculates survivor space parameters. 1641 void G1CollectorPolicy::update_survivors_policy() { 1642 double max_survivor_regions_d = 1643 (double) _young_list_target_length / (double) SurvivorRatio; 1644 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1645 // smaller than 1.0) we'll get 1. 1646 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1647 1648 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1649 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1650 } 1651 1652 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1653 // We actually check whether we are marking here and not if we are in a 1654 // reclamation phase. This means that we will schedule a concurrent mark 1655 // even while we are still in the process of reclaiming memory. 1656 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1657 if (!during_cycle) { 1658 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1659 collector_state()->set_initiate_conc_mark_if_possible(true); 1660 return true; 1661 } else { 1662 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1663 return false; 1664 } 1665 } 1666 1667 void G1CollectorPolicy::initiate_conc_mark() { 1668 collector_state()->set_during_initial_mark_pause(true); 1669 collector_state()->set_initiate_conc_mark_if_possible(false); 1670 } 1671 1672 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1673 // We are about to decide on whether this pause will be an 1674 // initial-mark pause. 1675 1676 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1677 // will set it here if we have to. However, it should be cleared by 1678 // the end of the pause (it's only set for the duration of an 1679 // initial-mark pause). 1680 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1681 1682 if (collector_state()->initiate_conc_mark_if_possible()) { 1683 // We had noticed on a previous pause that the heap occupancy has 1684 // gone over the initiating threshold and we should start a 1685 // concurrent marking cycle. So we might initiate one. 1686 1687 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1688 // Initiate a new initial mark if there is no marking or reclamation going on. 1689 initiate_conc_mark(); 1690 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1691 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1692 // Initiate a user requested initial mark. An initial mark must be young only 1693 // GC, so the collector state must be updated to reflect this. 1694 collector_state()->set_gcs_are_young(true); 1695 collector_state()->set_last_young_gc(false); 1696 1697 abort_time_to_mixed_tracking(); 1698 initiate_conc_mark(); 1699 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1700 } else { 1701 // The concurrent marking thread is still finishing up the 1702 // previous cycle. If we start one right now the two cycles 1703 // overlap. In particular, the concurrent marking thread might 1704 // be in the process of clearing the next marking bitmap (which 1705 // we will use for the next cycle if we start one). Starting a 1706 // cycle now will be bad given that parts of the marking 1707 // information might get cleared by the marking thread. And we 1708 // cannot wait for the marking thread to finish the cycle as it 1709 // periodically yields while clearing the next marking bitmap 1710 // and, if it's in a yield point, it's waiting for us to 1711 // finish. So, at this point we will not start a cycle and we'll 1712 // let the concurrent marking thread complete the last one. 1713 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1714 } 1715 } 1716 } 1717 1718 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1719 G1CollectedHeap* _g1h; 1720 CSetChooserParUpdater _cset_updater; 1721 1722 public: 1723 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1724 uint chunk_size) : 1725 _g1h(G1CollectedHeap::heap()), 1726 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1727 1728 bool doHeapRegion(HeapRegion* r) { 1729 // Do we have any marking information for this region? 1730 if (r->is_marked()) { 1731 // We will skip any region that's currently used as an old GC 1732 // alloc region (we should not consider those for collection 1733 // before we fill them up). 1734 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1735 _cset_updater.add_region(r); 1736 } 1737 } 1738 return false; 1739 } 1740 }; 1741 1742 class ParKnownGarbageTask: public AbstractGangTask { 1743 CollectionSetChooser* _hrSorted; 1744 uint _chunk_size; 1745 G1CollectedHeap* _g1; 1746 HeapRegionClaimer _hrclaimer; 1747 1748 public: 1749 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1750 AbstractGangTask("ParKnownGarbageTask"), 1751 _hrSorted(hrSorted), _chunk_size(chunk_size), 1752 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1753 1754 void work(uint worker_id) { 1755 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1756 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1757 } 1758 }; 1759 1760 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1761 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1762 const uint overpartition_factor = 4; 1763 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1764 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1765 } 1766 1767 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1768 cset_chooser()->clear(); 1769 1770 WorkGang* workers = _g1->workers(); 1771 uint n_workers = workers->active_workers(); 1772 1773 uint n_regions = _g1->num_regions(); 1774 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1775 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1776 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1777 workers->run_task(&par_known_garbage_task); 1778 1779 cset_chooser()->sort_regions(); 1780 1781 double end_sec = os::elapsedTime(); 1782 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1783 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1784 _prev_collection_pause_end_ms += elapsed_time_ms; 1785 1786 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1787 } 1788 1789 // Add the heap region at the head of the non-incremental collection set 1790 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1791 assert(_inc_cset_build_state == Active, "Precondition"); 1792 assert(hr->is_old(), "the region should be old"); 1793 1794 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1795 _g1->register_old_region_with_cset(hr); 1796 hr->set_next_in_collection_set(_collection_set); 1797 _collection_set = hr; 1798 _collection_set_bytes_used_before += hr->used(); 1799 size_t rs_length = hr->rem_set()->occupied(); 1800 _recorded_rs_lengths += rs_length; 1801 _old_cset_region_length += 1; 1802 } 1803 1804 // Initialize the per-collection-set information 1805 void G1CollectorPolicy::start_incremental_cset_building() { 1806 assert(_inc_cset_build_state == Inactive, "Precondition"); 1807 1808 _inc_cset_head = NULL; 1809 _inc_cset_tail = NULL; 1810 _inc_cset_bytes_used_before = 0; 1811 1812 _inc_cset_max_finger = 0; 1813 _inc_cset_recorded_rs_lengths = 0; 1814 _inc_cset_recorded_rs_lengths_diffs = 0; 1815 _inc_cset_predicted_elapsed_time_ms = 0.0; 1816 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1817 _inc_cset_build_state = Active; 1818 } 1819 1820 void G1CollectorPolicy::finalize_incremental_cset_building() { 1821 assert(_inc_cset_build_state == Active, "Precondition"); 1822 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1823 1824 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1825 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1826 // that adds a new region to the CSet. Further updates by the 1827 // concurrent refinement thread that samples the young RSet lengths 1828 // are accumulated in the *_diffs fields. Here we add the diffs to 1829 // the "main" fields. 1830 1831 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1832 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1833 } else { 1834 // This is defensive. The diff should in theory be always positive 1835 // as RSets can only grow between GCs. However, given that we 1836 // sample their size concurrently with other threads updating them 1837 // it's possible that we might get the wrong size back, which 1838 // could make the calculations somewhat inaccurate. 1839 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1840 if (_inc_cset_recorded_rs_lengths >= diffs) { 1841 _inc_cset_recorded_rs_lengths -= diffs; 1842 } else { 1843 _inc_cset_recorded_rs_lengths = 0; 1844 } 1845 } 1846 _inc_cset_predicted_elapsed_time_ms += 1847 _inc_cset_predicted_elapsed_time_ms_diffs; 1848 1849 _inc_cset_recorded_rs_lengths_diffs = 0; 1850 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1851 } 1852 1853 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1854 // This routine is used when: 1855 // * adding survivor regions to the incremental cset at the end of an 1856 // evacuation pause, 1857 // * adding the current allocation region to the incremental cset 1858 // when it is retired, and 1859 // * updating existing policy information for a region in the 1860 // incremental cset via young list RSet sampling. 1861 // Therefore this routine may be called at a safepoint by the 1862 // VM thread, or in-between safepoints by mutator threads (when 1863 // retiring the current allocation region) or a concurrent 1864 // refine thread (RSet sampling). 1865 1866 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 1867 size_t used_bytes = hr->used(); 1868 _inc_cset_recorded_rs_lengths += rs_length; 1869 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1870 _inc_cset_bytes_used_before += used_bytes; 1871 1872 // Cache the values we have added to the aggregated information 1873 // in the heap region in case we have to remove this region from 1874 // the incremental collection set, or it is updated by the 1875 // rset sampling code 1876 hr->set_recorded_rs_length(rs_length); 1877 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1878 } 1879 1880 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1881 size_t new_rs_length) { 1882 // Update the CSet information that is dependent on the new RS length 1883 assert(hr->is_young(), "Precondition"); 1884 assert(!SafepointSynchronize::is_at_safepoint(), 1885 "should not be at a safepoint"); 1886 1887 // We could have updated _inc_cset_recorded_rs_lengths and 1888 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1889 // that atomically, as this code is executed by a concurrent 1890 // refinement thread, potentially concurrently with a mutator thread 1891 // allocating a new region and also updating the same fields. To 1892 // avoid the atomic operations we accumulate these updates on two 1893 // separate fields (*_diffs) and we'll just add them to the "main" 1894 // fields at the start of a GC. 1895 1896 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1897 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1898 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1899 1900 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1901 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 1902 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1903 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1904 1905 hr->set_recorded_rs_length(new_rs_length); 1906 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1907 } 1908 1909 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1910 assert(hr->is_young(), "invariant"); 1911 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1912 assert(_inc_cset_build_state == Active, "Precondition"); 1913 1914 // We need to clear and set the cached recorded/cached collection set 1915 // information in the heap region here (before the region gets added 1916 // to the collection set). An individual heap region's cached values 1917 // are calculated, aggregated with the policy collection set info, 1918 // and cached in the heap region here (initially) and (subsequently) 1919 // by the Young List sampling code. 1920 1921 size_t rs_length = hr->rem_set()->occupied(); 1922 add_to_incremental_cset_info(hr, rs_length); 1923 1924 HeapWord* hr_end = hr->end(); 1925 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1926 1927 assert(!hr->in_collection_set(), "invariant"); 1928 _g1->register_young_region_with_cset(hr); 1929 assert(hr->next_in_collection_set() == NULL, "invariant"); 1930 } 1931 1932 // Add the region at the RHS of the incremental cset 1933 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1934 // We should only ever be appending survivors at the end of a pause 1935 assert(hr->is_survivor(), "Logic"); 1936 1937 // Do the 'common' stuff 1938 add_region_to_incremental_cset_common(hr); 1939 1940 // Now add the region at the right hand side 1941 if (_inc_cset_tail == NULL) { 1942 assert(_inc_cset_head == NULL, "invariant"); 1943 _inc_cset_head = hr; 1944 } else { 1945 _inc_cset_tail->set_next_in_collection_set(hr); 1946 } 1947 _inc_cset_tail = hr; 1948 } 1949 1950 // Add the region to the LHS of the incremental cset 1951 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1952 // Survivors should be added to the RHS at the end of a pause 1953 assert(hr->is_eden(), "Logic"); 1954 1955 // Do the 'common' stuff 1956 add_region_to_incremental_cset_common(hr); 1957 1958 // Add the region at the left hand side 1959 hr->set_next_in_collection_set(_inc_cset_head); 1960 if (_inc_cset_head == NULL) { 1961 assert(_inc_cset_tail == NULL, "Invariant"); 1962 _inc_cset_tail = hr; 1963 } 1964 _inc_cset_head = hr; 1965 } 1966 1967 #ifndef PRODUCT 1968 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1969 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 1970 1971 st->print_cr("\nCollection_set:"); 1972 HeapRegion* csr = list_head; 1973 while (csr != NULL) { 1974 HeapRegion* next = csr->next_in_collection_set(); 1975 assert(csr->in_collection_set(), "bad CS"); 1976 st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", 1977 HR_FORMAT_PARAMS(csr), 1978 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()), 1979 csr->age_in_surv_rate_group_cond()); 1980 csr = next; 1981 } 1982 } 1983 #endif // !PRODUCT 1984 1985 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 1986 // Returns the given amount of reclaimable bytes (that represents 1987 // the amount of reclaimable space still to be collected) as a 1988 // percentage of the current heap capacity. 1989 size_t capacity_bytes = _g1->capacity(); 1990 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1991 } 1992 1993 void G1CollectorPolicy::maybe_start_marking() { 1994 if (need_to_start_conc_mark("end of GC")) { 1995 // Note: this might have already been set, if during the last 1996 // pause we decided to start a cycle but at the beginning of 1997 // this pause we decided to postpone it. That's OK. 1998 collector_state()->set_initiate_conc_mark_if_possible(true); 1999 } 2000 } 2001 2002 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 2003 assert(!collector_state()->full_collection(), "must be"); 2004 if (collector_state()->during_initial_mark_pause()) { 2005 assert(collector_state()->last_gc_was_young(), "must be"); 2006 assert(!collector_state()->last_young_gc(), "must be"); 2007 return InitialMarkGC; 2008 } else if (collector_state()->last_young_gc()) { 2009 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2010 assert(collector_state()->last_gc_was_young(), "must be"); 2011 return LastYoungGC; 2012 } else if (!collector_state()->last_gc_was_young()) { 2013 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2014 assert(!collector_state()->last_young_gc(), "must be"); 2015 return MixedGC; 2016 } else { 2017 assert(collector_state()->last_gc_was_young(), "must be"); 2018 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2019 assert(!collector_state()->last_young_gc(), "must be"); 2020 return YoungOnlyGC; 2021 } 2022 } 2023 2024 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 2025 // Manage the MMU tracker. For some reason it ignores Full GCs. 2026 if (kind != FullGC) { 2027 _mmu_tracker->add_pause(start, end); 2028 } 2029 // Manage the mutator time tracking from initial mark to first mixed gc. 2030 switch (kind) { 2031 case FullGC: 2032 abort_time_to_mixed_tracking(); 2033 break; 2034 case Cleanup: 2035 case Remark: 2036 case YoungOnlyGC: 2037 case LastYoungGC: 2038 _initial_mark_to_mixed.add_pause(end - start); 2039 break; 2040 case InitialMarkGC: 2041 _initial_mark_to_mixed.record_initial_mark_end(end); 2042 break; 2043 case MixedGC: 2044 _initial_mark_to_mixed.record_mixed_gc_start(start); 2045 break; 2046 default: 2047 ShouldNotReachHere(); 2048 } 2049 } 2050 2051 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 2052 _initial_mark_to_mixed.reset(); 2053 } 2054 2055 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 2056 const char* false_action_str) const { 2057 if (cset_chooser()->is_empty()) { 2058 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 2059 return false; 2060 } 2061 2062 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 2063 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 2064 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2065 double threshold = (double) G1HeapWastePercent; 2066 if (reclaimable_perc <= threshold) { 2067 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 2068 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 2069 return false; 2070 } 2071 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 2072 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 2073 return true; 2074 } 2075 2076 uint G1CollectorPolicy::calc_min_old_cset_length() const { 2077 // The min old CSet region bound is based on the maximum desired 2078 // number of mixed GCs after a cycle. I.e., even if some old regions 2079 // look expensive, we should add them to the CSet anyway to make 2080 // sure we go through the available old regions in no more than the 2081 // maximum desired number of mixed GCs. 2082 // 2083 // The calculation is based on the number of marked regions we added 2084 // to the CSet chooser in the first place, not how many remain, so 2085 // that the result is the same during all mixed GCs that follow a cycle. 2086 2087 const size_t region_num = (size_t) cset_chooser()->length(); 2088 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 2089 size_t result = region_num / gc_num; 2090 // emulate ceiling 2091 if (result * gc_num < region_num) { 2092 result += 1; 2093 } 2094 return (uint) result; 2095 } 2096 2097 uint G1CollectorPolicy::calc_max_old_cset_length() const { 2098 // The max old CSet region bound is based on the threshold expressed 2099 // as a percentage of the heap size. I.e., it should bound the 2100 // number of old regions added to the CSet irrespective of how many 2101 // of them are available. 2102 2103 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2104 const size_t region_num = g1h->num_regions(); 2105 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 2106 size_t result = region_num * perc / 100; 2107 // emulate ceiling 2108 if (100 * result < region_num * perc) { 2109 result += 1; 2110 } 2111 return (uint) result; 2112 } 2113 2114 2115 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) { 2116 double young_start_time_sec = os::elapsedTime(); 2117 2118 YoungList* young_list = _g1->young_list(); 2119 finalize_incremental_cset_building(); 2120 2121 guarantee(target_pause_time_ms > 0.0, 2122 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); 2123 guarantee(_collection_set == NULL, "Precondition"); 2124 2125 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 2126 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 2127 2128 log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms", 2129 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 2130 2131 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young()); 2132 2133 if (collector_state()->last_gc_was_young()) { 2134 _trace_young_gen_time_data.increment_young_collection_count(); 2135 } else { 2136 _trace_young_gen_time_data.increment_mixed_collection_count(); 2137 } 2138 2139 // The young list is laid with the survivor regions from the previous 2140 // pause are appended to the RHS of the young list, i.e. 2141 // [Newly Young Regions ++ Survivors from last pause]. 2142 2143 uint survivor_region_length = young_list->survivor_length(); 2144 uint eden_region_length = young_list->eden_length(); 2145 init_cset_region_lengths(eden_region_length, survivor_region_length); 2146 2147 HeapRegion* hr = young_list->first_survivor_region(); 2148 while (hr != NULL) { 2149 assert(hr->is_survivor(), "badly formed young list"); 2150 // There is a convention that all the young regions in the CSet 2151 // are tagged as "eden", so we do this for the survivors here. We 2152 // use the special set_eden_pre_gc() as it doesn't check that the 2153 // region is free (which is not the case here). 2154 hr->set_eden_pre_gc(); 2155 hr = hr->get_next_young_region(); 2156 } 2157 2158 // Clear the fields that point to the survivor list - they are all young now. 2159 young_list->clear_survivors(); 2160 2161 _collection_set = _inc_cset_head; 2162 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 2163 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 2164 2165 log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms", 2166 eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms); 2167 2168 // The number of recorded young regions is the incremental 2169 // collection set's current size 2170 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 2171 2172 double young_end_time_sec = os::elapsedTime(); 2173 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 2174 2175 return time_remaining_ms; 2176 } 2177 2178 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) { 2179 double non_young_start_time_sec = os::elapsedTime(); 2180 double predicted_old_time_ms = 0.0; 2181 2182 2183 if (!collector_state()->gcs_are_young()) { 2184 cset_chooser()->verify(); 2185 const uint min_old_cset_length = calc_min_old_cset_length(); 2186 const uint max_old_cset_length = calc_max_old_cset_length(); 2187 2188 uint expensive_region_num = 0; 2189 bool check_time_remaining = adaptive_young_list_length(); 2190 2191 HeapRegion* hr = cset_chooser()->peek(); 2192 while (hr != NULL) { 2193 if (old_cset_region_length() >= max_old_cset_length) { 2194 // Added maximum number of old regions to the CSet. 2195 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions", 2196 old_cset_region_length(), max_old_cset_length); 2197 break; 2198 } 2199 2200 2201 // Stop adding regions if the remaining reclaimable space is 2202 // not above G1HeapWastePercent. 2203 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 2204 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2205 double threshold = (double) G1HeapWastePercent; 2206 if (reclaimable_perc <= threshold) { 2207 // We've added enough old regions that the amount of uncollected 2208 // reclaimable space is at or below the waste threshold. Stop 2209 // adding old regions to the CSet. 2210 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). " 2211 "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%", 2212 old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 2213 break; 2214 } 2215 2216 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 2217 if (check_time_remaining) { 2218 if (predicted_time_ms > time_remaining_ms) { 2219 // Too expensive for the current CSet. 2220 2221 if (old_cset_region_length() >= min_old_cset_length) { 2222 // We have added the minimum number of old regions to the CSet, 2223 // we are done with this CSet. 2224 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). " 2225 "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions", 2226 predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length); 2227 break; 2228 } 2229 2230 // We'll add it anyway given that we haven't reached the 2231 // minimum number of old regions. 2232 expensive_region_num += 1; 2233 } 2234 } else { 2235 if (old_cset_region_length() >= min_old_cset_length) { 2236 // In the non-auto-tuning case, we'll finish adding regions 2237 // to the CSet if we reach the minimum. 2238 2239 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions", 2240 old_cset_region_length(), min_old_cset_length); 2241 break; 2242 } 2243 } 2244 2245 // We will add this region to the CSet. 2246 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2247 predicted_old_time_ms += predicted_time_ms; 2248 cset_chooser()->pop(); // already have region via peek() 2249 _g1->old_set_remove(hr); 2250 add_old_region_to_cset(hr); 2251 2252 hr = cset_chooser()->peek(); 2253 } 2254 if (hr == NULL) { 2255 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)"); 2256 } 2257 2258 if (expensive_region_num > 0) { 2259 // We print the information once here at the end, predicated on 2260 // whether we added any apparently expensive regions or not, to 2261 // avoid generating output per region. 2262 log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)." 2263 "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms", 2264 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms); 2265 } 2266 2267 cset_chooser()->verify(); 2268 } 2269 2270 stop_incremental_cset_building(); 2271 2272 log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f", 2273 old_cset_region_length(), predicted_old_time_ms, time_remaining_ms); 2274 2275 double non_young_end_time_sec = os::elapsedTime(); 2276 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2277 } 2278 2279 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) { 2280 if(TraceYoungGenTime) { 2281 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2282 } 2283 } 2284 2285 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) { 2286 if(TraceYoungGenTime) { 2287 _all_yield_times_ms.add(yield_time_ms); 2288 } 2289 } 2290 2291 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2292 if(TraceYoungGenTime) { 2293 _total.add(pause_time_ms); 2294 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2295 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2296 _parallel.add(phase_times->cur_collection_par_time_ms()); 2297 _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan)); 2298 _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering)); 2299 _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS)); 2300 _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS)); 2301 _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy)); 2302 _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination)); 2303 2304 double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) + 2305 phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) + 2306 phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) + 2307 phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) + 2308 phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) + 2309 phase_times->average_time_ms(G1GCPhaseTimes::Termination); 2310 2311 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2312 _parallel_other.add(parallel_other_time); 2313 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2314 } 2315 } 2316 2317 void TraceYoungGenTimeData::increment_young_collection_count() { 2318 if(TraceYoungGenTime) { 2319 ++_young_pause_num; 2320 } 2321 } 2322 2323 void TraceYoungGenTimeData::increment_mixed_collection_count() { 2324 if(TraceYoungGenTime) { 2325 ++_mixed_pause_num; 2326 } 2327 } 2328 2329 void TraceYoungGenTimeData::print_summary(const char* str, 2330 const NumberSeq* seq) const { 2331 double sum = seq->sum(); 2332 tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2333 str, sum / 1000.0, seq->avg()); 2334 } 2335 2336 void TraceYoungGenTimeData::print_summary_sd(const char* str, 2337 const NumberSeq* seq) const { 2338 print_summary(str, seq); 2339 tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2340 "(num", seq->num(), seq->sd(), seq->maximum()); 2341 } 2342 2343 void TraceYoungGenTimeData::print() const { 2344 if (!TraceYoungGenTime) { 2345 return; 2346 } 2347 2348 tty->print_cr("ALL PAUSES"); 2349 print_summary_sd(" Total", &_total); 2350 tty->cr(); 2351 tty->cr(); 2352 tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2353 tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2354 tty->cr(); 2355 2356 tty->print_cr("EVACUATION PAUSES"); 2357 2358 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2359 tty->print_cr("none"); 2360 } else { 2361 print_summary_sd(" Evacuation Pauses", &_total); 2362 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2363 print_summary(" Parallel Time", &_parallel); 2364 print_summary(" Ext Root Scanning", &_ext_root_scan); 2365 print_summary(" SATB Filtering", &_satb_filtering); 2366 print_summary(" Update RS", &_update_rs); 2367 print_summary(" Scan RS", &_scan_rs); 2368 print_summary(" Object Copy", &_obj_copy); 2369 print_summary(" Termination", &_termination); 2370 print_summary(" Parallel Other", &_parallel_other); 2371 print_summary(" Clear CT", &_clear_ct); 2372 print_summary(" Other", &_other); 2373 } 2374 tty->cr(); 2375 2376 tty->print_cr("MISC"); 2377 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2378 print_summary_sd(" Yields", &_all_yield_times_ms); 2379 } 2380 2381 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) { 2382 if (TraceOldGenTime) { 2383 _all_full_gc_times.add(full_gc_time_ms); 2384 } 2385 } 2386 2387 void TraceOldGenTimeData::print() const { 2388 if (!TraceOldGenTime) { 2389 return; 2390 } 2391 2392 if (_all_full_gc_times.num() > 0) { 2393 tty->print("\n%4d full_gcs: total time = %8.2f s", 2394 _all_full_gc_times.num(), 2395 _all_full_gc_times.sum() / 1000.0); 2396 tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2397 tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2398 _all_full_gc_times.sd(), 2399 _all_full_gc_times.maximum()); 2400 } 2401 }