1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1IHOPControl.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/heapRegion.inline.hpp" 35 #include "gc/g1/heapRegionRemSet.hpp" 36 #include "gc/shared/gcPolicyCounters.hpp" 37 #include "runtime/arguments.hpp" 38 #include "runtime/java.hpp" 39 #include "runtime/mutexLocker.hpp" 40 #include "utilities/debug.hpp" 41 #include "utilities/pair.hpp" 42 43 // Different defaults for different number of GC threads 44 // They were chosen by running GCOld and SPECjbb on debris with different 45 // numbers of GC threads and choosing them based on the results 46 47 // all the same 48 static double rs_length_diff_defaults[] = { 49 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 50 }; 51 52 static double cost_per_card_ms_defaults[] = { 53 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 54 }; 55 56 // all the same 57 static double young_cards_per_entry_ratio_defaults[] = { 58 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 59 }; 60 61 static double cost_per_entry_ms_defaults[] = { 62 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 63 }; 64 65 static double cost_per_byte_ms_defaults[] = { 66 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 67 }; 68 69 // these should be pretty consistent 70 static double constant_other_time_ms_defaults[] = { 71 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 72 }; 73 74 75 static double young_other_cost_per_region_ms_defaults[] = { 76 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 77 }; 78 79 static double non_young_other_cost_per_region_ms_defaults[] = { 80 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 81 }; 82 83 G1CollectorPolicy::G1CollectorPolicy() : 84 _predictor(G1ConfidencePercent / 100.0), 85 86 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 87 88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 92 _prev_collection_pause_end_ms(0.0), 93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _non_young_other_cost_per_region_ms_seq( 105 new TruncatedSeq(TruncatedSeqLength)), 106 107 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 108 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 109 110 _pause_time_target_ms((double) MaxGCPauseMillis), 111 112 _recent_prev_end_times_for_all_gcs_sec( 113 new TruncatedSeq(NumPrevPausesForHeuristics)), 114 115 _recent_avg_pause_time_ratio(0.0), 116 _rs_lengths_prediction(0), 117 _max_survivor_regions(0), 118 119 // add here any more surv rate groups 120 _recorded_survivor_regions(0), 121 _recorded_survivor_head(NULL), 122 _recorded_survivor_tail(NULL), 123 _survivors_age_table(true), 124 125 _gc_overhead_perc(0.0), 126 127 _bytes_allocated_in_old_since_last_gc(0), 128 _ihop_control(NULL), 129 _initial_mark_to_mixed() { 130 131 // SurvRateGroups below must be initialized after the predictor because they 132 // indirectly use it through this object passed to their constructor. 133 _short_lived_surv_rate_group = 134 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 135 _survivor_surv_rate_group = 136 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 137 138 // Set up the region size and associated fields. Given that the 139 // policy is created before the heap, we have to set this up here, 140 // so it's done as soon as possible. 141 142 // It would have been natural to pass initial_heap_byte_size() and 143 // max_heap_byte_size() to setup_heap_region_size() but those have 144 // not been set up at this point since they should be aligned with 145 // the region size. So, there is a circular dependency here. We base 146 // the region size on the heap size, but the heap size should be 147 // aligned with the region size. To get around this we use the 148 // unaligned values for the heap. 149 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 150 HeapRegionRemSet::setup_remset_size(); 151 152 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 153 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 154 clear_ratio_check_data(); 155 156 _phase_times = new G1GCPhaseTimes(ParallelGCThreads); 157 158 int index = MIN2(ParallelGCThreads - 1, 7u); 159 160 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 161 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 162 _cost_scan_hcc_seq->add(0.0); 163 _young_cards_per_entry_ratio_seq->add( 164 young_cards_per_entry_ratio_defaults[index]); 165 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 166 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 167 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 168 _young_other_cost_per_region_ms_seq->add( 169 young_other_cost_per_region_ms_defaults[index]); 170 _non_young_other_cost_per_region_ms_seq->add( 171 non_young_other_cost_per_region_ms_defaults[index]); 172 173 // Below, we might need to calculate the pause time target based on 174 // the pause interval. When we do so we are going to give G1 maximum 175 // flexibility and allow it to do pauses when it needs to. So, we'll 176 // arrange that the pause interval to be pause time target + 1 to 177 // ensure that a) the pause time target is maximized with respect to 178 // the pause interval and b) we maintain the invariant that pause 179 // time target < pause interval. If the user does not want this 180 // maximum flexibility, they will have to set the pause interval 181 // explicitly. 182 183 // First make sure that, if either parameter is set, its value is 184 // reasonable. 185 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 186 if (MaxGCPauseMillis < 1) { 187 vm_exit_during_initialization("MaxGCPauseMillis should be " 188 "greater than 0"); 189 } 190 } 191 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 192 if (GCPauseIntervalMillis < 1) { 193 vm_exit_during_initialization("GCPauseIntervalMillis should be " 194 "greater than 0"); 195 } 196 } 197 198 // Then, if the pause time target parameter was not set, set it to 199 // the default value. 200 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 201 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 202 // The default pause time target in G1 is 200ms 203 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 204 } else { 205 // We do not allow the pause interval to be set without the 206 // pause time target 207 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 208 "without setting MaxGCPauseMillis"); 209 } 210 } 211 212 // Then, if the interval parameter was not set, set it according to 213 // the pause time target (this will also deal with the case when the 214 // pause time target is the default value). 215 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 216 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 217 } 218 219 // Finally, make sure that the two parameters are consistent. 220 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 221 char buffer[256]; 222 jio_snprintf(buffer, 256, 223 "MaxGCPauseMillis (%u) should be less than " 224 "GCPauseIntervalMillis (%u)", 225 MaxGCPauseMillis, GCPauseIntervalMillis); 226 vm_exit_during_initialization(buffer); 227 } 228 229 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 230 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 231 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 232 233 // start conservatively (around 50ms is about right) 234 _concurrent_mark_remark_times_ms->add(0.05); 235 _concurrent_mark_cleanup_times_ms->add(0.20); 236 _tenuring_threshold = MaxTenuringThreshold; 237 238 assert(GCTimeRatio > 0, 239 "we should have set it to a default value set_g1_gc_flags() " 240 "if a user set it to 0"); 241 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 242 243 uintx reserve_perc = G1ReservePercent; 244 // Put an artificial ceiling on this so that it's not set to a silly value. 245 if (reserve_perc > 50) { 246 reserve_perc = 50; 247 warning("G1ReservePercent is set to a value that is too large, " 248 "it's been updated to " UINTX_FORMAT, reserve_perc); 249 } 250 _reserve_factor = (double) reserve_perc / 100.0; 251 // This will be set when the heap is expanded 252 // for the first time during initialization. 253 _reserve_regions = 0; 254 } 255 256 G1CollectorPolicy::~G1CollectorPolicy() { 257 delete _ihop_control; 258 } 259 260 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 261 return _predictor.get_new_prediction(seq); 262 } 263 264 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { 265 return (size_t)get_new_prediction(seq); 266 } 267 268 void G1CollectorPolicy::initialize_alignments() { 269 _space_alignment = HeapRegion::GrainBytes; 270 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 271 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 272 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 273 } 274 275 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 276 277 // There are three command line options related to the young gen size: 278 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 279 // just a short form for NewSize==MaxNewSize). G1 will use its internal 280 // heuristics to calculate the actual young gen size, so these options 281 // basically only limit the range within which G1 can pick a young gen 282 // size. Also, these are general options taking byte sizes. G1 will 283 // internally work with a number of regions instead. So, some rounding 284 // will occur. 285 // 286 // If nothing related to the the young gen size is set on the command 287 // line we should allow the young gen to be between G1NewSizePercent 288 // and G1MaxNewSizePercent of the heap size. This means that every time 289 // the heap size changes, the limits for the young gen size will be 290 // recalculated. 291 // 292 // If only -XX:NewSize is set we should use the specified value as the 293 // minimum size for young gen. Still using G1MaxNewSizePercent of the 294 // heap as maximum. 295 // 296 // If only -XX:MaxNewSize is set we should use the specified value as the 297 // maximum size for young gen. Still using G1NewSizePercent of the heap 298 // as minimum. 299 // 300 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 301 // No updates when the heap size changes. There is a special case when 302 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 303 // different heuristic for calculating the collection set when we do mixed 304 // collection. 305 // 306 // If only -XX:NewRatio is set we should use the specified ratio of the heap 307 // as both min and max. This will be interpreted as "fixed" just like the 308 // NewSize==MaxNewSize case above. But we will update the min and max 309 // every time the heap size changes. 310 // 311 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 312 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 313 class G1YoungGenSizer : public CHeapObj<mtGC> { 314 private: 315 enum SizerKind { 316 SizerDefaults, 317 SizerNewSizeOnly, 318 SizerMaxNewSizeOnly, 319 SizerMaxAndNewSize, 320 SizerNewRatio 321 }; 322 SizerKind _sizer_kind; 323 uint _min_desired_young_length; 324 uint _max_desired_young_length; 325 bool _adaptive_size; 326 uint calculate_default_min_length(uint new_number_of_heap_regions); 327 uint calculate_default_max_length(uint new_number_of_heap_regions); 328 329 // Update the given values for minimum and maximum young gen length in regions 330 // given the number of heap regions depending on the kind of sizing algorithm. 331 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); 332 333 public: 334 G1YoungGenSizer(); 335 // Calculate the maximum length of the young gen given the number of regions 336 // depending on the sizing algorithm. 337 uint max_young_length(uint number_of_heap_regions); 338 339 void heap_size_changed(uint new_number_of_heap_regions); 340 uint min_desired_young_length() { 341 return _min_desired_young_length; 342 } 343 uint max_desired_young_length() { 344 return _max_desired_young_length; 345 } 346 347 bool adaptive_young_list_length() const { 348 return _adaptive_size; 349 } 350 }; 351 352 353 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), 354 _min_desired_young_length(0), _max_desired_young_length(0) { 355 if (FLAG_IS_CMDLINE(NewRatio)) { 356 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 357 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 358 } else { 359 _sizer_kind = SizerNewRatio; 360 _adaptive_size = false; 361 return; 362 } 363 } 364 365 if (NewSize > MaxNewSize) { 366 if (FLAG_IS_CMDLINE(MaxNewSize)) { 367 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 368 "A new max generation size of " SIZE_FORMAT "k will be used.", 369 NewSize/K, MaxNewSize/K, NewSize/K); 370 } 371 MaxNewSize = NewSize; 372 } 373 374 if (FLAG_IS_CMDLINE(NewSize)) { 375 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 376 1U); 377 if (FLAG_IS_CMDLINE(MaxNewSize)) { 378 _max_desired_young_length = 379 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 380 1U); 381 _sizer_kind = SizerMaxAndNewSize; 382 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 383 } else { 384 _sizer_kind = SizerNewSizeOnly; 385 } 386 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 387 _max_desired_young_length = 388 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 389 1U); 390 _sizer_kind = SizerMaxNewSizeOnly; 391 } 392 } 393 394 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 395 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 396 return MAX2(1U, default_value); 397 } 398 399 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 400 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 401 return MAX2(1U, default_value); 402 } 403 404 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { 405 assert(number_of_heap_regions > 0, "Heap must be initialized"); 406 407 switch (_sizer_kind) { 408 case SizerDefaults: 409 *min_young_length = calculate_default_min_length(number_of_heap_regions); 410 *max_young_length = calculate_default_max_length(number_of_heap_regions); 411 break; 412 case SizerNewSizeOnly: 413 *max_young_length = calculate_default_max_length(number_of_heap_regions); 414 *max_young_length = MAX2(*min_young_length, *max_young_length); 415 break; 416 case SizerMaxNewSizeOnly: 417 *min_young_length = calculate_default_min_length(number_of_heap_regions); 418 *min_young_length = MIN2(*min_young_length, *max_young_length); 419 break; 420 case SizerMaxAndNewSize: 421 // Do nothing. Values set on the command line, don't update them at runtime. 422 break; 423 case SizerNewRatio: 424 *min_young_length = number_of_heap_regions / (NewRatio + 1); 425 *max_young_length = *min_young_length; 426 break; 427 default: 428 ShouldNotReachHere(); 429 } 430 431 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); 432 } 433 434 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { 435 // We need to pass the desired values because recalculation may not update these 436 // values in some cases. 437 uint temp = _min_desired_young_length; 438 uint result = _max_desired_young_length; 439 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); 440 return result; 441 } 442 443 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 444 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, 445 &_max_desired_young_length); 446 } 447 448 void G1CollectorPolicy::post_heap_initialize() { 449 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 450 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 451 if (max_young_size != MaxNewSize) { 452 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 453 } 454 455 _ihop_control = create_ihop_control(); 456 } 457 458 void G1CollectorPolicy::initialize_flags() { 459 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 460 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 461 } 462 463 if (SurvivorRatio < 1) { 464 vm_exit_during_initialization("Invalid survivor ratio specified"); 465 } 466 CollectorPolicy::initialize_flags(); 467 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 468 } 469 470 471 void G1CollectorPolicy::init() { 472 // Set aside an initial future to_space. 473 _g1 = G1CollectedHeap::heap(); 474 _collection_set = _g1->collection_set(); 475 476 assert(Heap_lock->owned_by_self(), "Locking discipline."); 477 478 initialize_gc_policy_counters(); 479 480 if (adaptive_young_list_length()) { 481 _young_list_fixed_length = 0; 482 } else { 483 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 484 } 485 _free_regions_at_end_of_collection = _g1->num_free_regions(); 486 487 update_young_list_max_and_target_length(); 488 // We may immediately start allocating regions and placing them on the 489 // collection set list. Initialize the per-collection set info 490 _collection_set->start_incremental_building(); 491 } 492 493 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 494 phase_times()->note_gc_start(num_active_workers); 495 } 496 497 // Create the jstat counters for the policy. 498 void G1CollectorPolicy::initialize_gc_policy_counters() { 499 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 500 } 501 502 bool G1CollectorPolicy::predict_will_fit(uint young_length, 503 double base_time_ms, 504 uint base_free_regions, 505 double target_pause_time_ms) const { 506 if (young_length >= base_free_regions) { 507 // end condition 1: not enough space for the young regions 508 return false; 509 } 510 511 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 512 size_t bytes_to_copy = 513 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 514 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 515 double young_other_time_ms = predict_young_other_time_ms(young_length); 516 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 517 if (pause_time_ms > target_pause_time_ms) { 518 // end condition 2: prediction is over the target pause time 519 return false; 520 } 521 522 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 523 524 // When copying, we will likely need more bytes free than is live in the region. 525 // Add some safety margin to factor in the confidence of our guess, and the 526 // natural expected waste. 527 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 528 // of the calculation: the lower the confidence, the more headroom. 529 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 530 // copying due to anticipated waste in the PLABs. 531 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 532 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 533 534 if (expected_bytes_to_copy > free_bytes) { 535 // end condition 3: out-of-space 536 return false; 537 } 538 539 // success! 540 return true; 541 } 542 543 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 544 // re-calculate the necessary reserve 545 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 546 // We use ceiling so that if reserve_regions_d is > 0.0 (but 547 // smaller than 1.0) we'll get 1. 548 _reserve_regions = (uint) ceil(reserve_regions_d); 549 550 _young_gen_sizer->heap_size_changed(new_number_of_regions); 551 } 552 553 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 554 uint base_min_length) const { 555 uint desired_min_length = 0; 556 if (adaptive_young_list_length()) { 557 if (_alloc_rate_ms_seq->num() > 3) { 558 double now_sec = os::elapsedTime(); 559 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 560 double alloc_rate_ms = predict_alloc_rate_ms(); 561 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 562 } else { 563 // otherwise we don't have enough info to make the prediction 564 } 565 } 566 desired_min_length += base_min_length; 567 // make sure we don't go below any user-defined minimum bound 568 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 569 } 570 571 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 572 // Here, we might want to also take into account any additional 573 // constraints (i.e., user-defined minimum bound). Currently, we 574 // effectively don't set this bound. 575 return _young_gen_sizer->max_desired_young_length(); 576 } 577 578 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 579 return update_young_list_max_and_target_length(get_new_size_prediction(_rs_lengths_seq)); 580 } 581 582 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 583 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 584 update_max_gc_locker_expansion(); 585 return unbounded_target_length; 586 } 587 588 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 589 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 590 _young_list_target_length = young_lengths.first; 591 return young_lengths.second; 592 } 593 594 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 595 YoungTargetLengths result; 596 597 // Calculate the absolute and desired min bounds first. 598 599 // This is how many young regions we already have (currently: the survivors). 600 uint base_min_length = recorded_survivor_regions(); 601 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 602 // This is the absolute minimum young length. Ensure that we 603 // will at least have one eden region available for allocation. 604 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 605 // If we shrank the young list target it should not shrink below the current size. 606 desired_min_length = MAX2(desired_min_length, absolute_min_length); 607 // Calculate the absolute and desired max bounds. 608 609 uint desired_max_length = calculate_young_list_desired_max_length(); 610 611 uint young_list_target_length = 0; 612 if (adaptive_young_list_length()) { 613 if (collector_state()->gcs_are_young()) { 614 young_list_target_length = 615 calculate_young_list_target_length(rs_lengths, 616 base_min_length, 617 desired_min_length, 618 desired_max_length); 619 } else { 620 // Don't calculate anything and let the code below bound it to 621 // the desired_min_length, i.e., do the next GC as soon as 622 // possible to maximize how many old regions we can add to it. 623 } 624 } else { 625 // The user asked for a fixed young gen so we'll fix the young gen 626 // whether the next GC is young or mixed. 627 young_list_target_length = _young_list_fixed_length; 628 } 629 630 result.second = young_list_target_length; 631 632 // We will try our best not to "eat" into the reserve. 633 uint absolute_max_length = 0; 634 if (_free_regions_at_end_of_collection > _reserve_regions) { 635 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 636 } 637 if (desired_max_length > absolute_max_length) { 638 desired_max_length = absolute_max_length; 639 } 640 641 // Make sure we don't go over the desired max length, nor under the 642 // desired min length. In case they clash, desired_min_length wins 643 // which is why that test is second. 644 if (young_list_target_length > desired_max_length) { 645 young_list_target_length = desired_max_length; 646 } 647 if (young_list_target_length < desired_min_length) { 648 young_list_target_length = desired_min_length; 649 } 650 651 assert(young_list_target_length > recorded_survivor_regions(), 652 "we should be able to allocate at least one eden region"); 653 assert(young_list_target_length >= absolute_min_length, "post-condition"); 654 655 result.first = young_list_target_length; 656 return result; 657 } 658 659 uint 660 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 661 uint base_min_length, 662 uint desired_min_length, 663 uint desired_max_length) const { 664 assert(adaptive_young_list_length(), "pre-condition"); 665 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 666 667 // In case some edge-condition makes the desired max length too small... 668 if (desired_max_length <= desired_min_length) { 669 return desired_min_length; 670 } 671 672 // We'll adjust min_young_length and max_young_length not to include 673 // the already allocated young regions (i.e., so they reflect the 674 // min and max eden regions we'll allocate). The base_min_length 675 // will be reflected in the predictions by the 676 // survivor_regions_evac_time prediction. 677 assert(desired_min_length > base_min_length, "invariant"); 678 uint min_young_length = desired_min_length - base_min_length; 679 assert(desired_max_length > base_min_length, "invariant"); 680 uint max_young_length = desired_max_length - base_min_length; 681 682 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 683 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 684 size_t pending_cards = get_new_size_prediction(_pending_cards_seq); 685 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 686 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 687 double base_time_ms = 688 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 689 survivor_regions_evac_time; 690 uint available_free_regions = _free_regions_at_end_of_collection; 691 uint base_free_regions = 0; 692 if (available_free_regions > _reserve_regions) { 693 base_free_regions = available_free_regions - _reserve_regions; 694 } 695 696 // Here, we will make sure that the shortest young length that 697 // makes sense fits within the target pause time. 698 699 if (predict_will_fit(min_young_length, base_time_ms, 700 base_free_regions, target_pause_time_ms)) { 701 // The shortest young length will fit into the target pause time; 702 // we'll now check whether the absolute maximum number of young 703 // regions will fit in the target pause time. If not, we'll do 704 // a binary search between min_young_length and max_young_length. 705 if (predict_will_fit(max_young_length, base_time_ms, 706 base_free_regions, target_pause_time_ms)) { 707 // The maximum young length will fit into the target pause time. 708 // We are done so set min young length to the maximum length (as 709 // the result is assumed to be returned in min_young_length). 710 min_young_length = max_young_length; 711 } else { 712 // The maximum possible number of young regions will not fit within 713 // the target pause time so we'll search for the optimal 714 // length. The loop invariants are: 715 // 716 // min_young_length < max_young_length 717 // min_young_length is known to fit into the target pause time 718 // max_young_length is known not to fit into the target pause time 719 // 720 // Going into the loop we know the above hold as we've just 721 // checked them. Every time around the loop we check whether 722 // the middle value between min_young_length and 723 // max_young_length fits into the target pause time. If it 724 // does, it becomes the new min. If it doesn't, it becomes 725 // the new max. This way we maintain the loop invariants. 726 727 assert(min_young_length < max_young_length, "invariant"); 728 uint diff = (max_young_length - min_young_length) / 2; 729 while (diff > 0) { 730 uint young_length = min_young_length + diff; 731 if (predict_will_fit(young_length, base_time_ms, 732 base_free_regions, target_pause_time_ms)) { 733 min_young_length = young_length; 734 } else { 735 max_young_length = young_length; 736 } 737 assert(min_young_length < max_young_length, "invariant"); 738 diff = (max_young_length - min_young_length) / 2; 739 } 740 // The results is min_young_length which, according to the 741 // loop invariants, should fit within the target pause time. 742 743 // These are the post-conditions of the binary search above: 744 assert(min_young_length < max_young_length, 745 "otherwise we should have discovered that max_young_length " 746 "fits into the pause target and not done the binary search"); 747 assert(predict_will_fit(min_young_length, base_time_ms, 748 base_free_regions, target_pause_time_ms), 749 "min_young_length, the result of the binary search, should " 750 "fit into the pause target"); 751 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 752 base_free_regions, target_pause_time_ms), 753 "min_young_length, the result of the binary search, should be " 754 "optimal, so no larger length should fit into the pause target"); 755 } 756 } else { 757 // Even the minimum length doesn't fit into the pause time 758 // target, return it as the result nevertheless. 759 } 760 return base_min_length + min_young_length; 761 } 762 763 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 764 double survivor_regions_evac_time = 0.0; 765 for (HeapRegion * r = _recorded_survivor_head; 766 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 767 r = r->get_next_young_region()) { 768 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 769 } 770 return survivor_regions_evac_time; 771 } 772 773 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { 774 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 775 776 if (rs_lengths > _rs_lengths_prediction) { 777 // add 10% to avoid having to recalculate often 778 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 779 update_rs_lengths_prediction(rs_lengths_prediction); 780 781 update_young_list_max_and_target_length(rs_lengths_prediction); 782 } 783 } 784 785 void G1CollectorPolicy::update_rs_lengths_prediction() { 786 update_rs_lengths_prediction(get_new_size_prediction(_rs_lengths_seq)); 787 } 788 789 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 790 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 791 _rs_lengths_prediction = prediction; 792 } 793 } 794 795 #ifndef PRODUCT 796 bool G1CollectorPolicy::verify_young_ages() { 797 HeapRegion* head = _g1->young_list()->first_region(); 798 return 799 verify_young_ages(head, _short_lived_surv_rate_group); 800 // also call verify_young_ages on any additional surv rate groups 801 } 802 803 bool 804 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 805 SurvRateGroup *surv_rate_group) { 806 guarantee( surv_rate_group != NULL, "pre-condition" ); 807 808 const char* name = surv_rate_group->name(); 809 bool ret = true; 810 int prev_age = -1; 811 812 for (HeapRegion* curr = head; 813 curr != NULL; 814 curr = curr->get_next_young_region()) { 815 SurvRateGroup* group = curr->surv_rate_group(); 816 if (group == NULL && !curr->is_survivor()) { 817 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); 818 ret = false; 819 } 820 821 if (surv_rate_group == group) { 822 int age = curr->age_in_surv_rate_group(); 823 824 if (age < 0) { 825 log_error(gc, verify)("## %s: encountered negative age", name); 826 ret = false; 827 } 828 829 if (age <= prev_age) { 830 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 831 ret = false; 832 } 833 prev_age = age; 834 } 835 } 836 837 return ret; 838 } 839 #endif // PRODUCT 840 841 void G1CollectorPolicy::record_full_collection_start() { 842 _full_collection_start_sec = os::elapsedTime(); 843 // Release the future to-space so that it is available for compaction into. 844 collector_state()->set_full_collection(true); 845 } 846 847 void G1CollectorPolicy::record_full_collection_end() { 848 // Consider this like a collection pause for the purposes of allocation 849 // since last pause. 850 double end_sec = os::elapsedTime(); 851 double full_gc_time_sec = end_sec - _full_collection_start_sec; 852 double full_gc_time_ms = full_gc_time_sec * 1000.0; 853 854 update_recent_gc_times(end_sec, full_gc_time_ms); 855 856 collector_state()->set_full_collection(false); 857 858 // "Nuke" the heuristics that control the young/mixed GC 859 // transitions and make sure we start with young GCs after the Full GC. 860 collector_state()->set_gcs_are_young(true); 861 collector_state()->set_last_young_gc(false); 862 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 863 collector_state()->set_during_initial_mark_pause(false); 864 collector_state()->set_in_marking_window(false); 865 collector_state()->set_in_marking_window_im(false); 866 867 _short_lived_surv_rate_group->start_adding_regions(); 868 // also call this on any additional surv rate groups 869 870 record_survivor_regions(0, NULL, NULL); 871 872 _free_regions_at_end_of_collection = _g1->num_free_regions(); 873 // Reset survivors SurvRateGroup. 874 _survivor_surv_rate_group->reset(); 875 update_young_list_max_and_target_length(); 876 update_rs_lengths_prediction(); 877 cset_chooser()->clear(); 878 879 _bytes_allocated_in_old_since_last_gc = 0; 880 881 record_pause(FullGC, _full_collection_start_sec, end_sec); 882 } 883 884 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 885 // We only need to do this here as the policy will only be applied 886 // to the GC we're about to start. so, no point is calculating this 887 // every time we calculate / recalculate the target young length. 888 update_survivors_policy(); 889 890 assert(_g1->used() == _g1->recalculate_used(), 891 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 892 _g1->used(), _g1->recalculate_used()); 893 894 phase_times()->record_cur_collection_start_sec(start_time_sec); 895 _pending_cards = _g1->pending_card_num(); 896 897 _collection_set->reset_bytes_used_before(); 898 _bytes_copied_during_gc = 0; 899 900 collector_state()->set_last_gc_was_young(false); 901 902 // do that for any other surv rate groups 903 _short_lived_surv_rate_group->stop_adding_regions(); 904 _survivors_age_table.clear(); 905 906 assert( verify_young_ages(), "region age verification" ); 907 } 908 909 void G1CollectorPolicy::record_concurrent_mark_init_end(double 910 mark_init_elapsed_time_ms) { 911 collector_state()->set_during_marking(true); 912 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 913 collector_state()->set_during_initial_mark_pause(false); 914 } 915 916 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 917 _mark_remark_start_sec = os::elapsedTime(); 918 collector_state()->set_during_marking(false); 919 } 920 921 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 922 double end_time_sec = os::elapsedTime(); 923 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 924 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 925 _prev_collection_pause_end_ms += elapsed_time_ms; 926 927 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 928 } 929 930 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 931 _mark_cleanup_start_sec = os::elapsedTime(); 932 } 933 934 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 935 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 936 "skip last young-only gc"); 937 collector_state()->set_last_young_gc(should_continue_with_reclaim); 938 // We skip the marking phase. 939 if (!should_continue_with_reclaim) { 940 abort_time_to_mixed_tracking(); 941 } 942 collector_state()->set_in_marking_window(false); 943 } 944 945 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 946 return phase_times()->average_time_ms(phase); 947 } 948 949 double G1CollectorPolicy::young_other_time_ms() const { 950 return phase_times()->young_cset_choice_time_ms() + 951 phase_times()->young_free_cset_time_ms(); 952 } 953 954 double G1CollectorPolicy::non_young_other_time_ms() const { 955 return phase_times()->non_young_cset_choice_time_ms() + 956 phase_times()->non_young_free_cset_time_ms(); 957 958 } 959 960 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 961 return pause_time_ms - 962 average_time_ms(G1GCPhaseTimes::UpdateRS) - 963 average_time_ms(G1GCPhaseTimes::ScanRS) - 964 average_time_ms(G1GCPhaseTimes::ObjCopy) - 965 average_time_ms(G1GCPhaseTimes::Termination); 966 } 967 968 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 969 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 970 } 971 972 CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { 973 return _collection_set->cset_chooser(); 974 } 975 976 977 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 978 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 979 } 980 981 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 982 if (about_to_start_mixed_phase()) { 983 return false; 984 } 985 986 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 987 988 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 989 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 990 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 991 992 bool result = false; 993 if (marking_request_bytes > marking_initiating_used_threshold) { 994 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 995 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 996 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 997 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 998 } 999 1000 return result; 1001 } 1002 1003 // Anything below that is considered to be zero 1004 #define MIN_TIMER_GRANULARITY 0.0000001 1005 1006 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 1007 double end_time_sec = os::elapsedTime(); 1008 1009 size_t cur_used_bytes = _g1->used(); 1010 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1011 bool last_pause_included_initial_mark = false; 1012 bool update_stats = !_g1->evacuation_failed(); 1013 1014 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 1015 1016 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 1017 1018 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 1019 if (last_pause_included_initial_mark) { 1020 record_concurrent_mark_init_end(0.0); 1021 } else { 1022 maybe_start_marking(); 1023 } 1024 1025 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 1026 if (app_time_ms < MIN_TIMER_GRANULARITY) { 1027 // This usually happens due to the timer not having the required 1028 // granularity. Some Linuxes are the usual culprits. 1029 // We'll just set it to something (arbitrarily) small. 1030 app_time_ms = 1.0; 1031 } 1032 1033 if (update_stats) { 1034 // We maintain the invariant that all objects allocated by mutator 1035 // threads will be allocated out of eden regions. So, we can use 1036 // the eden region number allocated since the previous GC to 1037 // calculate the application's allocate rate. The only exception 1038 // to that is humongous objects that are allocated separately. But 1039 // given that humongous object allocations do not really affect 1040 // either the pause's duration nor when the next pause will take 1041 // place we can safely ignore them here. 1042 uint regions_allocated = _collection_set->eden_region_length(); 1043 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1044 _alloc_rate_ms_seq->add(alloc_rate_ms); 1045 1046 double interval_ms = 1047 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1048 update_recent_gc_times(end_time_sec, pause_time_ms); 1049 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1050 if (recent_avg_pause_time_ratio() < 0.0 || 1051 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1052 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1053 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1054 if (_recent_avg_pause_time_ratio < 0.0) { 1055 _recent_avg_pause_time_ratio = 0.0; 1056 } else { 1057 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1058 _recent_avg_pause_time_ratio = 1.0; 1059 } 1060 } 1061 1062 // Compute the ratio of just this last pause time to the entire time range stored 1063 // in the vectors. Comparing this pause to the entire range, rather than only the 1064 // most recent interval, has the effect of smoothing over a possible transient 'burst' 1065 // of more frequent pauses that don't really reflect a change in heap occupancy. 1066 // This reduces the likelihood of a needless heap expansion being triggered. 1067 _last_pause_time_ratio = 1068 (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; 1069 } 1070 1071 bool new_in_marking_window = collector_state()->in_marking_window(); 1072 bool new_in_marking_window_im = false; 1073 if (last_pause_included_initial_mark) { 1074 new_in_marking_window = true; 1075 new_in_marking_window_im = true; 1076 } 1077 1078 if (collector_state()->last_young_gc()) { 1079 // This is supposed to to be the "last young GC" before we start 1080 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1081 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 1082 1083 if (next_gc_should_be_mixed("start mixed GCs", 1084 "do not start mixed GCs")) { 1085 collector_state()->set_gcs_are_young(false); 1086 } else { 1087 // We aborted the mixed GC phase early. 1088 abort_time_to_mixed_tracking(); 1089 } 1090 1091 collector_state()->set_last_young_gc(false); 1092 } 1093 1094 if (!collector_state()->last_gc_was_young()) { 1095 // This is a mixed GC. Here we decide whether to continue doing 1096 // mixed GCs or not. 1097 if (!next_gc_should_be_mixed("continue mixed GCs", 1098 "do not continue mixed GCs")) { 1099 collector_state()->set_gcs_are_young(true); 1100 1101 maybe_start_marking(); 1102 } 1103 } 1104 1105 _short_lived_surv_rate_group->start_adding_regions(); 1106 // Do that for any other surv rate groups 1107 1108 if (update_stats) { 1109 double cost_per_card_ms = 0.0; 1110 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC); 1111 if (_pending_cards > 0) { 1112 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards; 1113 _cost_per_card_ms_seq->add(cost_per_card_ms); 1114 } 1115 _cost_scan_hcc_seq->add(cost_scan_hcc); 1116 1117 double cost_per_entry_ms = 0.0; 1118 if (cards_scanned > 10) { 1119 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 1120 if (collector_state()->last_gc_was_young()) { 1121 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1122 } else { 1123 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1124 } 1125 } 1126 1127 if (_max_rs_lengths > 0) { 1128 double cards_per_entry_ratio = 1129 (double) cards_scanned / (double) _max_rs_lengths; 1130 if (collector_state()->last_gc_was_young()) { 1131 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1132 } else { 1133 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1134 } 1135 } 1136 1137 // This is defensive. For a while _max_rs_lengths could get 1138 // smaller than _recorded_rs_lengths which was causing 1139 // rs_length_diff to get very large and mess up the RSet length 1140 // predictions. The reason was unsafe concurrent updates to the 1141 // _inc_cset_recorded_rs_lengths field which the code below guards 1142 // against (see CR 7118202). This bug has now been fixed (see CR 1143 // 7119027). However, I'm still worried that 1144 // _inc_cset_recorded_rs_lengths might still end up somewhat 1145 // inaccurate. The concurrent refinement thread calculates an 1146 // RSet's length concurrently with other CR threads updating it 1147 // which might cause it to calculate the length incorrectly (if, 1148 // say, it's in mid-coarsening). So I'll leave in the defensive 1149 // conditional below just in case. 1150 size_t rs_length_diff = 0; 1151 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); 1152 if (_max_rs_lengths > recorded_rs_lengths) { 1153 rs_length_diff = _max_rs_lengths - recorded_rs_lengths; 1154 } 1155 _rs_length_diff_seq->add((double) rs_length_diff); 1156 1157 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 1158 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; 1159 double cost_per_byte_ms = 0.0; 1160 1161 if (copied_bytes > 0) { 1162 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 1163 if (collector_state()->in_marking_window()) { 1164 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1165 } else { 1166 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1167 } 1168 } 1169 1170 if (_collection_set->young_region_length() > 0) { 1171 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 1172 _collection_set->young_region_length()); 1173 } 1174 1175 if (_collection_set->old_region_length() > 0) { 1176 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 1177 _collection_set->old_region_length()); 1178 } 1179 1180 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 1181 1182 _pending_cards_seq->add((double) _pending_cards); 1183 _rs_lengths_seq->add((double) _max_rs_lengths); 1184 } 1185 1186 collector_state()->set_in_marking_window(new_in_marking_window); 1187 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 1188 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1189 // IHOP control wants to know the expected young gen length if it were not 1190 // restrained by the heap reserve. Using the actual length would make the 1191 // prediction too small and the limit the young gen every time we get to the 1192 // predicted target occupancy. 1193 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 1194 update_rs_lengths_prediction(); 1195 1196 update_ihop_prediction(app_time_ms / 1000.0, 1197 _bytes_allocated_in_old_since_last_gc, 1198 last_unrestrained_young_length * HeapRegion::GrainBytes); 1199 _bytes_allocated_in_old_since_last_gc = 0; 1200 1201 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 1202 1203 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1204 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1205 1206 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC); 1207 1208 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1209 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 1210 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 1211 update_rs_time_goal_ms, scan_hcc_time_ms); 1212 1213 update_rs_time_goal_ms = 0; 1214 } else { 1215 update_rs_time_goal_ms -= scan_hcc_time_ms; 1216 } 1217 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1218 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1219 update_rs_time_goal_ms); 1220 1221 cset_chooser()->verify(); 1222 } 1223 1224 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1225 if (G1UseAdaptiveIHOP) { 1226 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1227 G1CollectedHeap::heap()->max_capacity(), 1228 &_predictor, 1229 G1ReservePercent, 1230 G1HeapWastePercent); 1231 } else { 1232 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, 1233 G1CollectedHeap::heap()->max_capacity()); 1234 } 1235 } 1236 1237 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 1238 size_t mutator_alloc_bytes, 1239 size_t young_gen_size) { 1240 // Always try to update IHOP prediction. Even evacuation failures give information 1241 // about e.g. whether to start IHOP earlier next time. 1242 1243 // Avoid using really small application times that might create samples with 1244 // very high or very low values. They may be caused by e.g. back-to-back gcs. 1245 double const min_valid_time = 1e-6; 1246 1247 bool report = false; 1248 1249 double marking_to_mixed_time = -1.0; 1250 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1251 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1252 assert(marking_to_mixed_time > 0.0, 1253 "Initial mark to mixed time must be larger than zero but is %.3f", 1254 marking_to_mixed_time); 1255 if (marking_to_mixed_time > min_valid_time) { 1256 _ihop_control->update_marking_length(marking_to_mixed_time); 1257 report = true; 1258 } 1259 } 1260 1261 // As an approximation for the young gc promotion rates during marking we use 1262 // all of them. In many applications there are only a few if any young gcs during 1263 // marking, which makes any prediction useless. This increases the accuracy of the 1264 // prediction. 1265 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1266 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1267 report = true; 1268 } 1269 1270 if (report) { 1271 report_ihop_statistics(); 1272 } 1273 } 1274 1275 void G1CollectorPolicy::report_ihop_statistics() { 1276 _ihop_control->print(); 1277 } 1278 1279 void G1CollectorPolicy::print_phases() { 1280 phase_times()->print(); 1281 } 1282 1283 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1284 double update_rs_processed_buffers, 1285 double goal_ms) { 1286 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1287 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1288 1289 if (G1UseAdaptiveConcRefinement) { 1290 const int k_gy = 3, k_gr = 6; 1291 const double inc_k = 1.1, dec_k = 0.9; 1292 1293 int g = cg1r->green_zone(); 1294 if (update_rs_time > goal_ms) { 1295 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1296 } else { 1297 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1298 g = (int)MAX2(g * inc_k, g + 1.0); 1299 } 1300 } 1301 // Change the refinement threads params 1302 cg1r->set_green_zone(g); 1303 cg1r->set_yellow_zone(g * k_gy); 1304 cg1r->set_red_zone(g * k_gr); 1305 cg1r->reinitialize_threads(); 1306 1307 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1); 1308 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1309 cg1r->yellow_zone()); 1310 // Change the barrier params 1311 dcqs.set_process_completed_threshold(processing_threshold); 1312 dcqs.set_max_completed_queue(cg1r->red_zone()); 1313 } 1314 1315 int curr_queue_size = dcqs.completed_buffers_num(); 1316 if (curr_queue_size >= cg1r->yellow_zone()) { 1317 dcqs.set_completed_queue_padding(curr_queue_size); 1318 } else { 1319 dcqs.set_completed_queue_padding(0); 1320 } 1321 dcqs.notify_if_necessary(); 1322 } 1323 1324 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1325 return get_new_size_prediction(_rs_length_diff_seq); 1326 } 1327 1328 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1329 return get_new_prediction(_alloc_rate_ms_seq); 1330 } 1331 1332 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1333 return get_new_prediction(_cost_per_card_ms_seq); 1334 } 1335 1336 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1337 return get_new_prediction(_cost_scan_hcc_seq); 1338 } 1339 1340 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1341 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1342 } 1343 1344 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1345 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1346 } 1347 1348 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1349 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1350 return predict_young_cards_per_entry_ratio(); 1351 } else { 1352 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1353 } 1354 } 1355 1356 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1357 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1358 } 1359 1360 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1361 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1362 } 1363 1364 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1365 if (collector_state()->gcs_are_young()) { 1366 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1367 } else { 1368 return predict_mixed_rs_scan_time_ms(card_num); 1369 } 1370 } 1371 1372 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1373 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1374 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1375 } else { 1376 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1377 } 1378 } 1379 1380 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1381 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1382 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1383 } else { 1384 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1385 } 1386 } 1387 1388 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1389 if (collector_state()->during_concurrent_mark()) { 1390 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1391 } else { 1392 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1393 } 1394 } 1395 1396 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1397 return get_new_prediction(_constant_other_time_ms_seq); 1398 } 1399 1400 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1401 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1402 } 1403 1404 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1405 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1406 } 1407 1408 double G1CollectorPolicy::predict_remark_time_ms() const { 1409 return get_new_prediction(_concurrent_mark_remark_times_ms); 1410 } 1411 1412 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1413 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1414 } 1415 1416 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1417 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1418 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1419 double pred = get_new_prediction(seq); 1420 if (pred > 1.0) { 1421 pred = 1.0; 1422 } 1423 return pred; 1424 } 1425 1426 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1427 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1428 } 1429 1430 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1431 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1432 } 1433 1434 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1435 size_t scanned_cards) const { 1436 return 1437 predict_rs_update_time_ms(pending_cards) + 1438 predict_rs_scan_time_ms(scanned_cards) + 1439 predict_constant_other_time_ms(); 1440 } 1441 1442 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1443 size_t rs_length = predict_rs_length_diff(); 1444 size_t card_num; 1445 if (collector_state()->gcs_are_young()) { 1446 card_num = predict_young_card_num(rs_length); 1447 } else { 1448 card_num = predict_non_young_card_num(rs_length); 1449 } 1450 return predict_base_elapsed_time_ms(pending_cards, card_num); 1451 } 1452 1453 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1454 size_t bytes_to_copy; 1455 if (hr->is_marked()) 1456 bytes_to_copy = hr->max_live_bytes(); 1457 else { 1458 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1459 int age = hr->age_in_surv_rate_group(); 1460 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1461 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1462 } 1463 return bytes_to_copy; 1464 } 1465 1466 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1467 bool for_young_gc) const { 1468 size_t rs_length = hr->rem_set()->occupied(); 1469 size_t card_num; 1470 1471 // Predicting the number of cards is based on which type of GC 1472 // we're predicting for. 1473 if (for_young_gc) { 1474 card_num = predict_young_card_num(rs_length); 1475 } else { 1476 card_num = predict_non_young_card_num(rs_length); 1477 } 1478 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1479 1480 double region_elapsed_time_ms = 1481 predict_rs_scan_time_ms(card_num) + 1482 predict_object_copy_time_ms(bytes_to_copy); 1483 1484 // The prediction of the "other" time for this region is based 1485 // upon the region type and NOT the GC type. 1486 if (hr->is_young()) { 1487 region_elapsed_time_ms += predict_young_other_time_ms(1); 1488 } else { 1489 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1490 } 1491 return region_elapsed_time_ms; 1492 } 1493 1494 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1495 double elapsed_ms) { 1496 _recent_gc_times_ms->add(elapsed_ms); 1497 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1498 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1499 } 1500 1501 void G1CollectorPolicy::clear_ratio_check_data() { 1502 _ratio_over_threshold_count = 0; 1503 _ratio_over_threshold_sum = 0.0; 1504 _pauses_since_start = 0; 1505 } 1506 1507 size_t G1CollectorPolicy::expansion_amount() { 1508 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1509 double last_gc_overhead = _last_pause_time_ratio * 100.0; 1510 double threshold = _gc_overhead_perc; 1511 size_t expand_bytes = 0; 1512 1513 // If the heap is at less than half its maximum size, scale the threshold down, 1514 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1515 // though the scaling code will likely keep the increase small. 1516 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1517 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1518 threshold = MAX2(threshold, 1.0); 1519 } 1520 1521 // If the last GC time ratio is over the threshold, increment the count of 1522 // times it has been exceeded, and add this ratio to the sum of exceeded 1523 // ratios. 1524 if (last_gc_overhead > threshold) { 1525 _ratio_over_threshold_count++; 1526 _ratio_over_threshold_sum += last_gc_overhead; 1527 } 1528 1529 // Check if we've had enough GC time ratio checks that were over the 1530 // threshold to trigger an expansion. We'll also expand if we've 1531 // reached the end of the history buffer and the average of all entries 1532 // is still over the threshold. This indicates a smaller number of GCs were 1533 // long enough to make the average exceed the threshold. 1534 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1535 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1536 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1537 size_t min_expand_bytes = HeapRegion::GrainBytes; 1538 size_t reserved_bytes = _g1->max_capacity(); 1539 size_t committed_bytes = _g1->capacity(); 1540 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1541 size_t expand_bytes_via_pct = 1542 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1543 double scale_factor = 1.0; 1544 1545 // If the current size is less than 1/4 of the Initial heap size, expand 1546 // by half of the delta between the current and Initial sizes. IE, grow 1547 // back quickly. 1548 // 1549 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1550 // the available expansion space, whichever is smaller, as the base 1551 // expansion size. Then possibly scale this size according to how much the 1552 // threshold has (on average) been exceeded by. If the delta is small 1553 // (less than the StartScaleDownAt value), scale the size down linearly, but 1554 // not by less than MinScaleDownFactor. If the delta is large (greater than 1555 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1556 // times the base size. The scaling will be linear in the range from 1557 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1558 // ScaleUpRange sets the rate of scaling up. 1559 if (committed_bytes < InitialHeapSize / 4) { 1560 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1561 } else { 1562 double const MinScaleDownFactor = 0.2; 1563 double const MaxScaleUpFactor = 2; 1564 double const StartScaleDownAt = _gc_overhead_perc; 1565 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1566 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1567 1568 double ratio_delta; 1569 if (filled_history_buffer) { 1570 ratio_delta = recent_gc_overhead - threshold; 1571 } else { 1572 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1573 } 1574 1575 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1576 if (ratio_delta < StartScaleDownAt) { 1577 scale_factor = ratio_delta / StartScaleDownAt; 1578 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1579 } else if (ratio_delta > StartScaleUpAt) { 1580 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1581 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1582 } 1583 } 1584 1585 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1586 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1587 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1588 1589 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1590 1591 // Ensure the expansion size is at least the minimum growth amount 1592 // and at most the remaining uncommitted byte size. 1593 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1594 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1595 1596 clear_ratio_check_data(); 1597 } else { 1598 // An expansion was not triggered. If we've started counting, increment 1599 // the number of checks we've made in the current window. If we've 1600 // reached the end of the window without resizing, clear the counters to 1601 // start again the next time we see a ratio above the threshold. 1602 if (_ratio_over_threshold_count > 0) { 1603 _pauses_since_start++; 1604 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1605 clear_ratio_check_data(); 1606 } 1607 } 1608 } 1609 1610 return expand_bytes; 1611 } 1612 1613 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1614 #ifndef PRODUCT 1615 _short_lived_surv_rate_group->print_surv_rate_summary(); 1616 // add this call for any other surv rate groups 1617 #endif // PRODUCT 1618 } 1619 1620 bool G1CollectorPolicy::is_young_list_full() const { 1621 uint young_list_length = _g1->young_list()->length(); 1622 uint young_list_target_length = _young_list_target_length; 1623 return young_list_length >= young_list_target_length; 1624 } 1625 1626 bool G1CollectorPolicy::can_expand_young_list() const { 1627 uint young_list_length = _g1->young_list()->length(); 1628 uint young_list_max_length = _young_list_max_length; 1629 return young_list_length < young_list_max_length; 1630 } 1631 1632 bool G1CollectorPolicy::adaptive_young_list_length() const { 1633 return _young_gen_sizer->adaptive_young_list_length(); 1634 } 1635 1636 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1637 uint expansion_region_num = 0; 1638 if (GCLockerEdenExpansionPercent > 0) { 1639 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1640 double expansion_region_num_d = perc * (double) _young_list_target_length; 1641 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1642 // less than 1.0) we'll get 1. 1643 expansion_region_num = (uint) ceil(expansion_region_num_d); 1644 } else { 1645 assert(expansion_region_num == 0, "sanity"); 1646 } 1647 _young_list_max_length = _young_list_target_length + expansion_region_num; 1648 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1649 } 1650 1651 // Calculates survivor space parameters. 1652 void G1CollectorPolicy::update_survivors_policy() { 1653 double max_survivor_regions_d = 1654 (double) _young_list_target_length / (double) SurvivorRatio; 1655 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1656 // smaller than 1.0) we'll get 1. 1657 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1658 1659 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1660 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1661 } 1662 1663 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1664 // We actually check whether we are marking here and not if we are in a 1665 // reclamation phase. This means that we will schedule a concurrent mark 1666 // even while we are still in the process of reclaiming memory. 1667 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1668 if (!during_cycle) { 1669 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1670 collector_state()->set_initiate_conc_mark_if_possible(true); 1671 return true; 1672 } else { 1673 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1674 return false; 1675 } 1676 } 1677 1678 void G1CollectorPolicy::initiate_conc_mark() { 1679 collector_state()->set_during_initial_mark_pause(true); 1680 collector_state()->set_initiate_conc_mark_if_possible(false); 1681 } 1682 1683 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1684 // We are about to decide on whether this pause will be an 1685 // initial-mark pause. 1686 1687 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1688 // will set it here if we have to. However, it should be cleared by 1689 // the end of the pause (it's only set for the duration of an 1690 // initial-mark pause). 1691 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1692 1693 if (collector_state()->initiate_conc_mark_if_possible()) { 1694 // We had noticed on a previous pause that the heap occupancy has 1695 // gone over the initiating threshold and we should start a 1696 // concurrent marking cycle. So we might initiate one. 1697 1698 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1699 // Initiate a new initial mark if there is no marking or reclamation going on. 1700 initiate_conc_mark(); 1701 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1702 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1703 // Initiate a user requested initial mark. An initial mark must be young only 1704 // GC, so the collector state must be updated to reflect this. 1705 collector_state()->set_gcs_are_young(true); 1706 collector_state()->set_last_young_gc(false); 1707 1708 abort_time_to_mixed_tracking(); 1709 initiate_conc_mark(); 1710 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1711 } else { 1712 // The concurrent marking thread is still finishing up the 1713 // previous cycle. If we start one right now the two cycles 1714 // overlap. In particular, the concurrent marking thread might 1715 // be in the process of clearing the next marking bitmap (which 1716 // we will use for the next cycle if we start one). Starting a 1717 // cycle now will be bad given that parts of the marking 1718 // information might get cleared by the marking thread. And we 1719 // cannot wait for the marking thread to finish the cycle as it 1720 // periodically yields while clearing the next marking bitmap 1721 // and, if it's in a yield point, it's waiting for us to 1722 // finish. So, at this point we will not start a cycle and we'll 1723 // let the concurrent marking thread complete the last one. 1724 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1725 } 1726 } 1727 } 1728 1729 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1730 G1CollectedHeap* _g1h; 1731 CSetChooserParUpdater _cset_updater; 1732 1733 public: 1734 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1735 uint chunk_size) : 1736 _g1h(G1CollectedHeap::heap()), 1737 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1738 1739 bool doHeapRegion(HeapRegion* r) { 1740 // Do we have any marking information for this region? 1741 if (r->is_marked()) { 1742 // We will skip any region that's currently used as an old GC 1743 // alloc region (we should not consider those for collection 1744 // before we fill them up). 1745 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1746 _cset_updater.add_region(r); 1747 } 1748 } 1749 return false; 1750 } 1751 }; 1752 1753 class ParKnownGarbageTask: public AbstractGangTask { 1754 CollectionSetChooser* _hrSorted; 1755 uint _chunk_size; 1756 G1CollectedHeap* _g1; 1757 HeapRegionClaimer _hrclaimer; 1758 1759 public: 1760 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1761 AbstractGangTask("ParKnownGarbageTask"), 1762 _hrSorted(hrSorted), _chunk_size(chunk_size), 1763 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1764 1765 void work(uint worker_id) { 1766 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1767 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1768 } 1769 }; 1770 1771 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1772 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1773 const uint overpartition_factor = 4; 1774 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1775 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1776 } 1777 1778 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1779 cset_chooser()->clear(); 1780 1781 WorkGang* workers = _g1->workers(); 1782 uint n_workers = workers->active_workers(); 1783 1784 uint n_regions = _g1->num_regions(); 1785 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1786 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1787 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1788 workers->run_task(&par_known_garbage_task); 1789 1790 cset_chooser()->sort_regions(); 1791 1792 double end_sec = os::elapsedTime(); 1793 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1794 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1795 _prev_collection_pause_end_ms += elapsed_time_ms; 1796 1797 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1798 } 1799 1800 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 1801 // Returns the given amount of reclaimable bytes (that represents 1802 // the amount of reclaimable space still to be collected) as a 1803 // percentage of the current heap capacity. 1804 size_t capacity_bytes = _g1->capacity(); 1805 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1806 } 1807 1808 void G1CollectorPolicy::maybe_start_marking() { 1809 if (need_to_start_conc_mark("end of GC")) { 1810 // Note: this might have already been set, if during the last 1811 // pause we decided to start a cycle but at the beginning of 1812 // this pause we decided to postpone it. That's OK. 1813 collector_state()->set_initiate_conc_mark_if_possible(true); 1814 } 1815 } 1816 1817 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 1818 assert(!collector_state()->full_collection(), "must be"); 1819 if (collector_state()->during_initial_mark_pause()) { 1820 assert(collector_state()->last_gc_was_young(), "must be"); 1821 assert(!collector_state()->last_young_gc(), "must be"); 1822 return InitialMarkGC; 1823 } else if (collector_state()->last_young_gc()) { 1824 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1825 assert(collector_state()->last_gc_was_young(), "must be"); 1826 return LastYoungGC; 1827 } else if (!collector_state()->last_gc_was_young()) { 1828 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1829 assert(!collector_state()->last_young_gc(), "must be"); 1830 return MixedGC; 1831 } else { 1832 assert(collector_state()->last_gc_was_young(), "must be"); 1833 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1834 assert(!collector_state()->last_young_gc(), "must be"); 1835 return YoungOnlyGC; 1836 } 1837 } 1838 1839 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 1840 // Manage the MMU tracker. For some reason it ignores Full GCs. 1841 if (kind != FullGC) { 1842 _mmu_tracker->add_pause(start, end); 1843 } 1844 // Manage the mutator time tracking from initial mark to first mixed gc. 1845 switch (kind) { 1846 case FullGC: 1847 abort_time_to_mixed_tracking(); 1848 break; 1849 case Cleanup: 1850 case Remark: 1851 case YoungOnlyGC: 1852 case LastYoungGC: 1853 _initial_mark_to_mixed.add_pause(end - start); 1854 break; 1855 case InitialMarkGC: 1856 _initial_mark_to_mixed.record_initial_mark_end(end); 1857 break; 1858 case MixedGC: 1859 _initial_mark_to_mixed.record_mixed_gc_start(start); 1860 break; 1861 default: 1862 ShouldNotReachHere(); 1863 } 1864 } 1865 1866 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 1867 _initial_mark_to_mixed.reset(); 1868 } 1869 1870 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1871 const char* false_action_str) const { 1872 if (cset_chooser()->is_empty()) { 1873 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1874 return false; 1875 } 1876 1877 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1878 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 1879 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1880 double threshold = (double) G1HeapWastePercent; 1881 if (reclaimable_perc <= threshold) { 1882 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1883 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1884 return false; 1885 } 1886 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1887 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1888 return true; 1889 } 1890 1891 uint G1CollectorPolicy::calc_min_old_cset_length() const { 1892 // The min old CSet region bound is based on the maximum desired 1893 // number of mixed GCs after a cycle. I.e., even if some old regions 1894 // look expensive, we should add them to the CSet anyway to make 1895 // sure we go through the available old regions in no more than the 1896 // maximum desired number of mixed GCs. 1897 // 1898 // The calculation is based on the number of marked regions we added 1899 // to the CSet chooser in the first place, not how many remain, so 1900 // that the result is the same during all mixed GCs that follow a cycle. 1901 1902 const size_t region_num = (size_t) cset_chooser()->length(); 1903 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1904 size_t result = region_num / gc_num; 1905 // emulate ceiling 1906 if (result * gc_num < region_num) { 1907 result += 1; 1908 } 1909 return (uint) result; 1910 } 1911 1912 uint G1CollectorPolicy::calc_max_old_cset_length() const { 1913 // The max old CSet region bound is based on the threshold expressed 1914 // as a percentage of the heap size. I.e., it should bound the 1915 // number of old regions added to the CSet irrespective of how many 1916 // of them are available. 1917 1918 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1919 const size_t region_num = g1h->num_regions(); 1920 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1921 size_t result = region_num * perc / 100; 1922 // emulate ceiling 1923 if (100 * result < region_num * perc) { 1924 result += 1; 1925 } 1926 return (uint) result; 1927 } 1928 1929 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { 1930 double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); 1931 _collection_set->finalize_old_part(time_remaining_ms); 1932 } 1933