1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1IHOPControl.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1YoungGenSizer.hpp" 35 #include "gc/g1/heapRegion.inline.hpp" 36 #include "gc/g1/heapRegionRemSet.hpp" 37 #include "gc/shared/gcPolicyCounters.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "utilities/debug.hpp" 42 #include "utilities/pair.hpp" 43 44 // Different defaults for different number of GC threads 45 // They were chosen by running GCOld and SPECjbb on debris with different 46 // numbers of GC threads and choosing them based on the results 47 48 // all the same 49 static double rs_length_diff_defaults[] = { 50 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 51 }; 52 53 static double cost_per_card_ms_defaults[] = { 54 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 55 }; 56 57 // all the same 58 static double young_cards_per_entry_ratio_defaults[] = { 59 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 60 }; 61 62 static double cost_per_entry_ms_defaults[] = { 63 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 64 }; 65 66 static double cost_per_byte_ms_defaults[] = { 67 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 68 }; 69 70 // these should be pretty consistent 71 static double constant_other_time_ms_defaults[] = { 72 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 73 }; 74 75 76 static double young_other_cost_per_region_ms_defaults[] = { 77 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 78 }; 79 80 static double non_young_other_cost_per_region_ms_defaults[] = { 81 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 82 }; 83 84 G1CollectorPolicy::G1CollectorPolicy() : 85 _predictor(G1ConfidencePercent / 100.0), 86 87 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 88 89 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 91 92 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 93 _prev_collection_pause_end_ms(0.0), 94 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 105 _non_young_other_cost_per_region_ms_seq( 106 new TruncatedSeq(TruncatedSeqLength)), 107 108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 110 111 _pause_time_target_ms((double) MaxGCPauseMillis), 112 113 _recent_prev_end_times_for_all_gcs_sec( 114 new TruncatedSeq(NumPrevPausesForHeuristics)), 115 116 _recent_avg_pause_time_ratio(0.0), 117 _rs_lengths_prediction(0), 118 _max_survivor_regions(0), 119 120 // add here any more surv rate groups 121 _survivors_age_table(true), 122 123 _gc_overhead_perc(0.0), 124 125 _bytes_allocated_in_old_since_last_gc(0), 126 _ihop_control(NULL), 127 _initial_mark_to_mixed() { 128 129 // SurvRateGroups below must be initialized after the predictor because they 130 // indirectly use it through this object passed to their constructor. 131 _short_lived_surv_rate_group = 132 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 133 _survivor_surv_rate_group = 134 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 135 136 // Set up the region size and associated fields. Given that the 137 // policy is created before the heap, we have to set this up here, 138 // so it's done as soon as possible. 139 140 // It would have been natural to pass initial_heap_byte_size() and 141 // max_heap_byte_size() to setup_heap_region_size() but those have 142 // not been set up at this point since they should be aligned with 143 // the region size. So, there is a circular dependency here. We base 144 // the region size on the heap size, but the heap size should be 145 // aligned with the region size. To get around this we use the 146 // unaligned values for the heap. 147 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 148 HeapRegionRemSet::setup_remset_size(); 149 150 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 151 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 152 clear_ratio_check_data(); 153 154 _phase_times = new G1GCPhaseTimes(ParallelGCThreads); 155 156 int index = MIN2(ParallelGCThreads - 1, 7u); 157 158 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 159 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 160 _cost_scan_hcc_seq->add(0.0); 161 _young_cards_per_entry_ratio_seq->add( 162 young_cards_per_entry_ratio_defaults[index]); 163 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 164 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 165 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 166 _young_other_cost_per_region_ms_seq->add( 167 young_other_cost_per_region_ms_defaults[index]); 168 _non_young_other_cost_per_region_ms_seq->add( 169 non_young_other_cost_per_region_ms_defaults[index]); 170 171 // Below, we might need to calculate the pause time target based on 172 // the pause interval. When we do so we are going to give G1 maximum 173 // flexibility and allow it to do pauses when it needs to. So, we'll 174 // arrange that the pause interval to be pause time target + 1 to 175 // ensure that a) the pause time target is maximized with respect to 176 // the pause interval and b) we maintain the invariant that pause 177 // time target < pause interval. If the user does not want this 178 // maximum flexibility, they will have to set the pause interval 179 // explicitly. 180 181 // First make sure that, if either parameter is set, its value is 182 // reasonable. 183 guarantee(MaxGCPauseMillis >= 1, "Range checking for MaxGCPauseMillis should guarantee that value is >= 1"); 184 185 // Then, if the pause time target parameter was not set, set it to 186 // the default value. 187 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 188 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 189 // The default pause time target in G1 is 200ms 190 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 191 } else { 192 // We do not allow the pause interval to be set without the 193 // pause time target 194 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 195 "without setting MaxGCPauseMillis"); 196 } 197 } 198 199 // Then, if the interval parameter was not set, set it according to 200 // the pause time target (this will also deal with the case when the 201 // pause time target is the default value). 202 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 203 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 204 } 205 guarantee(GCPauseIntervalMillis >= 1, "Constraint for GCPauseIntervalMillis should guarantee that value is >= 1"); 206 guarantee(GCPauseIntervalMillis > MaxGCPauseMillis, "Constraint for GCPauseIntervalMillis should guarantee that GCPauseIntervalMillis > MaxGCPauseMillis"); 207 208 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 209 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 210 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 211 212 // start conservatively (around 50ms is about right) 213 _concurrent_mark_remark_times_ms->add(0.05); 214 _concurrent_mark_cleanup_times_ms->add(0.20); 215 _tenuring_threshold = MaxTenuringThreshold; 216 217 assert(GCTimeRatio > 0, 218 "we should have set it to a default value set_g1_gc_flags() " 219 "if a user set it to 0"); 220 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 221 222 guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50."); 223 _reserve_factor = (double) G1ReservePercent / 100.0; 224 // This will be set when the heap is expanded 225 // for the first time during initialization. 226 _reserve_regions = 0; 227 228 _ihop_control = create_ihop_control(); 229 } 230 231 G1CollectorPolicy::~G1CollectorPolicy() { 232 delete _ihop_control; 233 } 234 235 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 236 return _predictor.get_new_prediction(seq); 237 } 238 239 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { 240 return (size_t)get_new_prediction(seq); 241 } 242 243 void G1CollectorPolicy::initialize_alignments() { 244 _space_alignment = HeapRegion::GrainBytes; 245 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 246 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 247 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 248 } 249 250 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 251 252 void G1CollectorPolicy::post_heap_initialize() { 253 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 254 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 255 if (max_young_size != MaxNewSize) { 256 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 257 } 258 } 259 260 void G1CollectorPolicy::initialize_flags() { 261 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 262 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 263 } 264 265 guarantee(SurvivorRatio >= 1, "Range checking for SurvivorRatio should guarantee that value is >= 1"); 266 267 CollectorPolicy::initialize_flags(); 268 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 269 } 270 271 272 void G1CollectorPolicy::init() { 273 // Set aside an initial future to_space. 274 _g1 = G1CollectedHeap::heap(); 275 _collection_set = _g1->collection_set(); 276 _collection_set->set_policy(this); 277 278 assert(Heap_lock->owned_by_self(), "Locking discipline."); 279 280 initialize_gc_policy_counters(); 281 282 if (adaptive_young_list_length()) { 283 _young_list_fixed_length = 0; 284 } else { 285 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 286 } 287 _free_regions_at_end_of_collection = _g1->num_free_regions(); 288 289 update_young_list_max_and_target_length(); 290 // We may immediately start allocating regions and placing them on the 291 // collection set list. Initialize the per-collection set info 292 _collection_set->start_incremental_building(); 293 } 294 295 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 296 phase_times()->note_gc_start(num_active_workers); 297 } 298 299 // Create the jstat counters for the policy. 300 void G1CollectorPolicy::initialize_gc_policy_counters() { 301 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 302 } 303 304 bool G1CollectorPolicy::predict_will_fit(uint young_length, 305 double base_time_ms, 306 uint base_free_regions, 307 double target_pause_time_ms) const { 308 if (young_length >= base_free_regions) { 309 // end condition 1: not enough space for the young regions 310 return false; 311 } 312 313 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 314 size_t bytes_to_copy = 315 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 316 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 317 double young_other_time_ms = predict_young_other_time_ms(young_length); 318 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 319 if (pause_time_ms > target_pause_time_ms) { 320 // end condition 2: prediction is over the target pause time 321 return false; 322 } 323 324 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 325 326 // When copying, we will likely need more bytes free than is live in the region. 327 // Add some safety margin to factor in the confidence of our guess, and the 328 // natural expected waste. 329 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 330 // of the calculation: the lower the confidence, the more headroom. 331 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 332 // copying due to anticipated waste in the PLABs. 333 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 334 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 335 336 if (expected_bytes_to_copy > free_bytes) { 337 // end condition 3: out-of-space 338 return false; 339 } 340 341 // success! 342 return true; 343 } 344 345 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 346 // re-calculate the necessary reserve 347 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 348 // We use ceiling so that if reserve_regions_d is > 0.0 (but 349 // smaller than 1.0) we'll get 1. 350 _reserve_regions = (uint) ceil(reserve_regions_d); 351 352 _young_gen_sizer->heap_size_changed(new_number_of_regions); 353 354 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 355 } 356 357 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 358 uint base_min_length) const { 359 uint desired_min_length = 0; 360 if (adaptive_young_list_length()) { 361 if (_alloc_rate_ms_seq->num() > 3) { 362 double now_sec = os::elapsedTime(); 363 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 364 double alloc_rate_ms = predict_alloc_rate_ms(); 365 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 366 } else { 367 // otherwise we don't have enough info to make the prediction 368 } 369 } 370 desired_min_length += base_min_length; 371 // make sure we don't go below any user-defined minimum bound 372 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 373 } 374 375 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 376 // Here, we might want to also take into account any additional 377 // constraints (i.e., user-defined minimum bound). Currently, we 378 // effectively don't set this bound. 379 return _young_gen_sizer->max_desired_young_length(); 380 } 381 382 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 383 return update_young_list_max_and_target_length(predict_rs_lengths()); 384 } 385 386 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 387 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 388 update_max_gc_locker_expansion(); 389 return unbounded_target_length; 390 } 391 392 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 393 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 394 _young_list_target_length = young_lengths.first; 395 return young_lengths.second; 396 } 397 398 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 399 YoungTargetLengths result; 400 401 // Calculate the absolute and desired min bounds first. 402 403 // This is how many young regions we already have (currently: the survivors). 404 const uint base_min_length = _g1->young_list()->survivor_length(); 405 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 406 // This is the absolute minimum young length. Ensure that we 407 // will at least have one eden region available for allocation. 408 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 409 // If we shrank the young list target it should not shrink below the current size. 410 desired_min_length = MAX2(desired_min_length, absolute_min_length); 411 // Calculate the absolute and desired max bounds. 412 413 uint desired_max_length = calculate_young_list_desired_max_length(); 414 415 uint young_list_target_length = 0; 416 if (adaptive_young_list_length()) { 417 if (collector_state()->gcs_are_young()) { 418 young_list_target_length = 419 calculate_young_list_target_length(rs_lengths, 420 base_min_length, 421 desired_min_length, 422 desired_max_length); 423 } else { 424 // Don't calculate anything and let the code below bound it to 425 // the desired_min_length, i.e., do the next GC as soon as 426 // possible to maximize how many old regions we can add to it. 427 } 428 } else { 429 // The user asked for a fixed young gen so we'll fix the young gen 430 // whether the next GC is young or mixed. 431 young_list_target_length = _young_list_fixed_length; 432 } 433 434 result.second = young_list_target_length; 435 436 // We will try our best not to "eat" into the reserve. 437 uint absolute_max_length = 0; 438 if (_free_regions_at_end_of_collection > _reserve_regions) { 439 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 440 } 441 if (desired_max_length > absolute_max_length) { 442 desired_max_length = absolute_max_length; 443 } 444 445 // Make sure we don't go over the desired max length, nor under the 446 // desired min length. In case they clash, desired_min_length wins 447 // which is why that test is second. 448 if (young_list_target_length > desired_max_length) { 449 young_list_target_length = desired_max_length; 450 } 451 if (young_list_target_length < desired_min_length) { 452 young_list_target_length = desired_min_length; 453 } 454 455 assert(young_list_target_length > base_min_length, 456 "we should be able to allocate at least one eden region"); 457 assert(young_list_target_length >= absolute_min_length, "post-condition"); 458 459 result.first = young_list_target_length; 460 return result; 461 } 462 463 uint 464 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 465 uint base_min_length, 466 uint desired_min_length, 467 uint desired_max_length) const { 468 assert(adaptive_young_list_length(), "pre-condition"); 469 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 470 471 // In case some edge-condition makes the desired max length too small... 472 if (desired_max_length <= desired_min_length) { 473 return desired_min_length; 474 } 475 476 // We'll adjust min_young_length and max_young_length not to include 477 // the already allocated young regions (i.e., so they reflect the 478 // min and max eden regions we'll allocate). The base_min_length 479 // will be reflected in the predictions by the 480 // survivor_regions_evac_time prediction. 481 assert(desired_min_length > base_min_length, "invariant"); 482 uint min_young_length = desired_min_length - base_min_length; 483 assert(desired_max_length > base_min_length, "invariant"); 484 uint max_young_length = desired_max_length - base_min_length; 485 486 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 487 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 488 size_t pending_cards = get_new_size_prediction(_pending_cards_seq); 489 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 490 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 491 double base_time_ms = 492 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 493 survivor_regions_evac_time; 494 uint available_free_regions = _free_regions_at_end_of_collection; 495 uint base_free_regions = 0; 496 if (available_free_regions > _reserve_regions) { 497 base_free_regions = available_free_regions - _reserve_regions; 498 } 499 500 // Here, we will make sure that the shortest young length that 501 // makes sense fits within the target pause time. 502 503 if (predict_will_fit(min_young_length, base_time_ms, 504 base_free_regions, target_pause_time_ms)) { 505 // The shortest young length will fit into the target pause time; 506 // we'll now check whether the absolute maximum number of young 507 // regions will fit in the target pause time. If not, we'll do 508 // a binary search between min_young_length and max_young_length. 509 if (predict_will_fit(max_young_length, base_time_ms, 510 base_free_regions, target_pause_time_ms)) { 511 // The maximum young length will fit into the target pause time. 512 // We are done so set min young length to the maximum length (as 513 // the result is assumed to be returned in min_young_length). 514 min_young_length = max_young_length; 515 } else { 516 // The maximum possible number of young regions will not fit within 517 // the target pause time so we'll search for the optimal 518 // length. The loop invariants are: 519 // 520 // min_young_length < max_young_length 521 // min_young_length is known to fit into the target pause time 522 // max_young_length is known not to fit into the target pause time 523 // 524 // Going into the loop we know the above hold as we've just 525 // checked them. Every time around the loop we check whether 526 // the middle value between min_young_length and 527 // max_young_length fits into the target pause time. If it 528 // does, it becomes the new min. If it doesn't, it becomes 529 // the new max. This way we maintain the loop invariants. 530 531 assert(min_young_length < max_young_length, "invariant"); 532 uint diff = (max_young_length - min_young_length) / 2; 533 while (diff > 0) { 534 uint young_length = min_young_length + diff; 535 if (predict_will_fit(young_length, base_time_ms, 536 base_free_regions, target_pause_time_ms)) { 537 min_young_length = young_length; 538 } else { 539 max_young_length = young_length; 540 } 541 assert(min_young_length < max_young_length, "invariant"); 542 diff = (max_young_length - min_young_length) / 2; 543 } 544 // The results is min_young_length which, according to the 545 // loop invariants, should fit within the target pause time. 546 547 // These are the post-conditions of the binary search above: 548 assert(min_young_length < max_young_length, 549 "otherwise we should have discovered that max_young_length " 550 "fits into the pause target and not done the binary search"); 551 assert(predict_will_fit(min_young_length, base_time_ms, 552 base_free_regions, target_pause_time_ms), 553 "min_young_length, the result of the binary search, should " 554 "fit into the pause target"); 555 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 556 base_free_regions, target_pause_time_ms), 557 "min_young_length, the result of the binary search, should be " 558 "optimal, so no larger length should fit into the pause target"); 559 } 560 } else { 561 // Even the minimum length doesn't fit into the pause time 562 // target, return it as the result nevertheless. 563 } 564 return base_min_length + min_young_length; 565 } 566 567 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 568 double survivor_regions_evac_time = 0.0; 569 for (HeapRegion * r = _g1->young_list()->first_survivor_region(); 570 r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region(); 571 r = r->get_next_young_region()) { 572 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 573 } 574 return survivor_regions_evac_time; 575 } 576 577 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { 578 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 579 580 if (rs_lengths > _rs_lengths_prediction) { 581 // add 10% to avoid having to recalculate often 582 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 583 update_rs_lengths_prediction(rs_lengths_prediction); 584 585 update_young_list_max_and_target_length(rs_lengths_prediction); 586 } 587 } 588 589 void G1CollectorPolicy::update_rs_lengths_prediction() { 590 update_rs_lengths_prediction(predict_rs_lengths()); 591 } 592 593 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 594 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 595 _rs_lengths_prediction = prediction; 596 } 597 } 598 599 #ifndef PRODUCT 600 bool G1CollectorPolicy::verify_young_ages() { 601 HeapRegion* head = _g1->young_list()->first_region(); 602 return 603 verify_young_ages(head, _short_lived_surv_rate_group); 604 // also call verify_young_ages on any additional surv rate groups 605 } 606 607 bool 608 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 609 SurvRateGroup *surv_rate_group) { 610 guarantee( surv_rate_group != NULL, "pre-condition" ); 611 612 const char* name = surv_rate_group->name(); 613 bool ret = true; 614 int prev_age = -1; 615 616 for (HeapRegion* curr = head; 617 curr != NULL; 618 curr = curr->get_next_young_region()) { 619 SurvRateGroup* group = curr->surv_rate_group(); 620 if (group == NULL && !curr->is_survivor()) { 621 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); 622 ret = false; 623 } 624 625 if (surv_rate_group == group) { 626 int age = curr->age_in_surv_rate_group(); 627 628 if (age < 0) { 629 log_error(gc, verify)("## %s: encountered negative age", name); 630 ret = false; 631 } 632 633 if (age <= prev_age) { 634 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 635 ret = false; 636 } 637 prev_age = age; 638 } 639 } 640 641 return ret; 642 } 643 #endif // PRODUCT 644 645 void G1CollectorPolicy::record_full_collection_start() { 646 _full_collection_start_sec = os::elapsedTime(); 647 // Release the future to-space so that it is available for compaction into. 648 collector_state()->set_full_collection(true); 649 } 650 651 void G1CollectorPolicy::record_full_collection_end() { 652 // Consider this like a collection pause for the purposes of allocation 653 // since last pause. 654 double end_sec = os::elapsedTime(); 655 double full_gc_time_sec = end_sec - _full_collection_start_sec; 656 double full_gc_time_ms = full_gc_time_sec * 1000.0; 657 658 update_recent_gc_times(end_sec, full_gc_time_ms); 659 660 collector_state()->set_full_collection(false); 661 662 // "Nuke" the heuristics that control the young/mixed GC 663 // transitions and make sure we start with young GCs after the Full GC. 664 collector_state()->set_gcs_are_young(true); 665 collector_state()->set_last_young_gc(false); 666 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 667 collector_state()->set_during_initial_mark_pause(false); 668 collector_state()->set_in_marking_window(false); 669 collector_state()->set_in_marking_window_im(false); 670 671 _short_lived_surv_rate_group->start_adding_regions(); 672 // also call this on any additional surv rate groups 673 674 _free_regions_at_end_of_collection = _g1->num_free_regions(); 675 // Reset survivors SurvRateGroup. 676 _survivor_surv_rate_group->reset(); 677 update_young_list_max_and_target_length(); 678 update_rs_lengths_prediction(); 679 cset_chooser()->clear(); 680 681 _bytes_allocated_in_old_since_last_gc = 0; 682 683 record_pause(FullGC, _full_collection_start_sec, end_sec); 684 } 685 686 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 687 // We only need to do this here as the policy will only be applied 688 // to the GC we're about to start. so, no point is calculating this 689 // every time we calculate / recalculate the target young length. 690 update_survivors_policy(); 691 692 assert(_g1->used() == _g1->recalculate_used(), 693 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 694 _g1->used(), _g1->recalculate_used()); 695 696 phase_times()->record_cur_collection_start_sec(start_time_sec); 697 _pending_cards = _g1->pending_card_num(); 698 699 _collection_set->reset_bytes_used_before(); 700 _bytes_copied_during_gc = 0; 701 702 collector_state()->set_last_gc_was_young(false); 703 704 // do that for any other surv rate groups 705 _short_lived_surv_rate_group->stop_adding_regions(); 706 _survivors_age_table.clear(); 707 708 assert( verify_young_ages(), "region age verification" ); 709 } 710 711 void G1CollectorPolicy::record_concurrent_mark_init_end(double 712 mark_init_elapsed_time_ms) { 713 collector_state()->set_during_marking(true); 714 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 715 collector_state()->set_during_initial_mark_pause(false); 716 } 717 718 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 719 _mark_remark_start_sec = os::elapsedTime(); 720 collector_state()->set_during_marking(false); 721 } 722 723 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 724 double end_time_sec = os::elapsedTime(); 725 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 726 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 727 _prev_collection_pause_end_ms += elapsed_time_ms; 728 729 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 730 } 731 732 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 733 _mark_cleanup_start_sec = os::elapsedTime(); 734 } 735 736 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 737 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 738 "skip last young-only gc"); 739 collector_state()->set_last_young_gc(should_continue_with_reclaim); 740 // We skip the marking phase. 741 if (!should_continue_with_reclaim) { 742 abort_time_to_mixed_tracking(); 743 } 744 collector_state()->set_in_marking_window(false); 745 } 746 747 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 748 return phase_times()->average_time_ms(phase); 749 } 750 751 double G1CollectorPolicy::young_other_time_ms() const { 752 return phase_times()->young_cset_choice_time_ms() + 753 phase_times()->young_free_cset_time_ms(); 754 } 755 756 double G1CollectorPolicy::non_young_other_time_ms() const { 757 return phase_times()->non_young_cset_choice_time_ms() + 758 phase_times()->non_young_free_cset_time_ms(); 759 760 } 761 762 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 763 return pause_time_ms - 764 average_time_ms(G1GCPhaseTimes::UpdateRS) - 765 average_time_ms(G1GCPhaseTimes::ScanRS) - 766 average_time_ms(G1GCPhaseTimes::ObjCopy) - 767 average_time_ms(G1GCPhaseTimes::Termination); 768 } 769 770 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 771 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 772 } 773 774 CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { 775 return _collection_set->cset_chooser(); 776 } 777 778 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 779 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 780 } 781 782 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 783 if (about_to_start_mixed_phase()) { 784 return false; 785 } 786 787 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 788 789 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 790 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 791 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 792 793 bool result = false; 794 if (marking_request_bytes > marking_initiating_used_threshold) { 795 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 796 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 797 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 798 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 799 } 800 801 return result; 802 } 803 804 // Anything below that is considered to be zero 805 #define MIN_TIMER_GRANULARITY 0.0000001 806 807 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 808 double end_time_sec = os::elapsedTime(); 809 810 size_t cur_used_bytes = _g1->used(); 811 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 812 bool last_pause_included_initial_mark = false; 813 bool update_stats = !_g1->evacuation_failed(); 814 815 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 816 817 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 818 819 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 820 if (last_pause_included_initial_mark) { 821 record_concurrent_mark_init_end(0.0); 822 } else { 823 maybe_start_marking(); 824 } 825 826 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 827 if (app_time_ms < MIN_TIMER_GRANULARITY) { 828 // This usually happens due to the timer not having the required 829 // granularity. Some Linuxes are the usual culprits. 830 // We'll just set it to something (arbitrarily) small. 831 app_time_ms = 1.0; 832 } 833 834 if (update_stats) { 835 // We maintain the invariant that all objects allocated by mutator 836 // threads will be allocated out of eden regions. So, we can use 837 // the eden region number allocated since the previous GC to 838 // calculate the application's allocate rate. The only exception 839 // to that is humongous objects that are allocated separately. But 840 // given that humongous object allocations do not really affect 841 // either the pause's duration nor when the next pause will take 842 // place we can safely ignore them here. 843 uint regions_allocated = _collection_set->eden_region_length(); 844 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 845 _alloc_rate_ms_seq->add(alloc_rate_ms); 846 847 double interval_ms = 848 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 849 update_recent_gc_times(end_time_sec, pause_time_ms); 850 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 851 if (recent_avg_pause_time_ratio() < 0.0 || 852 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 853 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 854 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 855 if (_recent_avg_pause_time_ratio < 0.0) { 856 _recent_avg_pause_time_ratio = 0.0; 857 } else { 858 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 859 _recent_avg_pause_time_ratio = 1.0; 860 } 861 } 862 863 // Compute the ratio of just this last pause time to the entire time range stored 864 // in the vectors. Comparing this pause to the entire range, rather than only the 865 // most recent interval, has the effect of smoothing over a possible transient 'burst' 866 // of more frequent pauses that don't really reflect a change in heap occupancy. 867 // This reduces the likelihood of a needless heap expansion being triggered. 868 _last_pause_time_ratio = 869 (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; 870 } 871 872 bool new_in_marking_window = collector_state()->in_marking_window(); 873 bool new_in_marking_window_im = false; 874 if (last_pause_included_initial_mark) { 875 new_in_marking_window = true; 876 new_in_marking_window_im = true; 877 } 878 879 if (collector_state()->last_young_gc()) { 880 // This is supposed to to be the "last young GC" before we start 881 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 882 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 883 884 if (next_gc_should_be_mixed("start mixed GCs", 885 "do not start mixed GCs")) { 886 collector_state()->set_gcs_are_young(false); 887 } else { 888 // We aborted the mixed GC phase early. 889 abort_time_to_mixed_tracking(); 890 } 891 892 collector_state()->set_last_young_gc(false); 893 } 894 895 if (!collector_state()->last_gc_was_young()) { 896 // This is a mixed GC. Here we decide whether to continue doing 897 // mixed GCs or not. 898 if (!next_gc_should_be_mixed("continue mixed GCs", 899 "do not continue mixed GCs")) { 900 collector_state()->set_gcs_are_young(true); 901 902 maybe_start_marking(); 903 } 904 } 905 906 _short_lived_surv_rate_group->start_adding_regions(); 907 // Do that for any other surv rate groups 908 909 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; 910 911 if (update_stats) { 912 double cost_per_card_ms = 0.0; 913 if (_pending_cards > 0) { 914 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; 915 _cost_per_card_ms_seq->add(cost_per_card_ms); 916 } 917 _cost_scan_hcc_seq->add(scan_hcc_time_ms); 918 919 double cost_per_entry_ms = 0.0; 920 if (cards_scanned > 10) { 921 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 922 if (collector_state()->last_gc_was_young()) { 923 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 924 } else { 925 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 926 } 927 } 928 929 if (_max_rs_lengths > 0) { 930 double cards_per_entry_ratio = 931 (double) cards_scanned / (double) _max_rs_lengths; 932 if (collector_state()->last_gc_was_young()) { 933 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 934 } else { 935 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 936 } 937 } 938 939 // This is defensive. For a while _max_rs_lengths could get 940 // smaller than _recorded_rs_lengths which was causing 941 // rs_length_diff to get very large and mess up the RSet length 942 // predictions. The reason was unsafe concurrent updates to the 943 // _inc_cset_recorded_rs_lengths field which the code below guards 944 // against (see CR 7118202). This bug has now been fixed (see CR 945 // 7119027). However, I'm still worried that 946 // _inc_cset_recorded_rs_lengths might still end up somewhat 947 // inaccurate. The concurrent refinement thread calculates an 948 // RSet's length concurrently with other CR threads updating it 949 // which might cause it to calculate the length incorrectly (if, 950 // say, it's in mid-coarsening). So I'll leave in the defensive 951 // conditional below just in case. 952 size_t rs_length_diff = 0; 953 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); 954 if (_max_rs_lengths > recorded_rs_lengths) { 955 rs_length_diff = _max_rs_lengths - recorded_rs_lengths; 956 } 957 _rs_length_diff_seq->add((double) rs_length_diff); 958 959 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 960 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; 961 double cost_per_byte_ms = 0.0; 962 963 if (copied_bytes > 0) { 964 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 965 if (collector_state()->in_marking_window()) { 966 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 967 } else { 968 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 969 } 970 } 971 972 if (_collection_set->young_region_length() > 0) { 973 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 974 _collection_set->young_region_length()); 975 } 976 977 if (_collection_set->old_region_length() > 0) { 978 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 979 _collection_set->old_region_length()); 980 } 981 982 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 983 984 _pending_cards_seq->add((double) _pending_cards); 985 _rs_lengths_seq->add((double) _max_rs_lengths); 986 } 987 988 collector_state()->set_in_marking_window(new_in_marking_window); 989 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 990 _free_regions_at_end_of_collection = _g1->num_free_regions(); 991 // IHOP control wants to know the expected young gen length if it were not 992 // restrained by the heap reserve. Using the actual length would make the 993 // prediction too small and the limit the young gen every time we get to the 994 // predicted target occupancy. 995 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 996 update_rs_lengths_prediction(); 997 998 update_ihop_prediction(app_time_ms / 1000.0, 999 _bytes_allocated_in_old_since_last_gc, 1000 last_unrestrained_young_length * HeapRegion::GrainBytes); 1001 _bytes_allocated_in_old_since_last_gc = 0; 1002 1003 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 1004 1005 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1006 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1007 1008 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1009 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 1010 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 1011 update_rs_time_goal_ms, scan_hcc_time_ms); 1012 1013 update_rs_time_goal_ms = 0; 1014 } else { 1015 update_rs_time_goal_ms -= scan_hcc_time_ms; 1016 } 1017 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1018 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1019 update_rs_time_goal_ms); 1020 1021 cset_chooser()->verify(); 1022 } 1023 1024 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1025 if (G1UseAdaptiveIHOP) { 1026 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1027 &_predictor, 1028 G1ReservePercent, 1029 G1HeapWastePercent); 1030 } else { 1031 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 1032 } 1033 } 1034 1035 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 1036 size_t mutator_alloc_bytes, 1037 size_t young_gen_size) { 1038 // Always try to update IHOP prediction. Even evacuation failures give information 1039 // about e.g. whether to start IHOP earlier next time. 1040 1041 // Avoid using really small application times that might create samples with 1042 // very high or very low values. They may be caused by e.g. back-to-back gcs. 1043 double const min_valid_time = 1e-6; 1044 1045 bool report = false; 1046 1047 double marking_to_mixed_time = -1.0; 1048 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1049 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1050 assert(marking_to_mixed_time > 0.0, 1051 "Initial mark to mixed time must be larger than zero but is %.3f", 1052 marking_to_mixed_time); 1053 if (marking_to_mixed_time > min_valid_time) { 1054 _ihop_control->update_marking_length(marking_to_mixed_time); 1055 report = true; 1056 } 1057 } 1058 1059 // As an approximation for the young gc promotion rates during marking we use 1060 // all of them. In many applications there are only a few if any young gcs during 1061 // marking, which makes any prediction useless. This increases the accuracy of the 1062 // prediction. 1063 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1064 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1065 report = true; 1066 } 1067 1068 if (report) { 1069 report_ihop_statistics(); 1070 } 1071 } 1072 1073 void G1CollectorPolicy::report_ihop_statistics() { 1074 _ihop_control->print(); 1075 } 1076 1077 void G1CollectorPolicy::print_phases() { 1078 phase_times()->print(); 1079 } 1080 1081 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1082 double update_rs_processed_buffers, 1083 double goal_ms) { 1084 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1085 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1086 1087 if (G1UseAdaptiveConcRefinement) { 1088 const int k_gy = 3, k_gr = 6; 1089 const double inc_k = 1.1, dec_k = 0.9; 1090 1091 size_t g = cg1r->green_zone(); 1092 if (update_rs_time > goal_ms) { 1093 g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1094 } else { 1095 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1096 g = (size_t)MAX2(g * inc_k, g + 1.0); 1097 } 1098 } 1099 // Change the refinement threads params 1100 cg1r->set_green_zone(g); 1101 cg1r->set_yellow_zone(g * k_gy); 1102 cg1r->set_red_zone(g * k_gr); 1103 cg1r->reinitialize_threads(); 1104 1105 size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1); 1106 size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1107 cg1r->yellow_zone()); 1108 // Change the barrier params 1109 dcqs.set_process_completed_threshold((int)processing_threshold); 1110 dcqs.set_max_completed_queue((int)cg1r->red_zone()); 1111 } 1112 1113 size_t curr_queue_size = dcqs.completed_buffers_num(); 1114 if (curr_queue_size >= cg1r->yellow_zone()) { 1115 dcqs.set_completed_queue_padding(curr_queue_size); 1116 } else { 1117 dcqs.set_completed_queue_padding(0); 1118 } 1119 dcqs.notify_if_necessary(); 1120 } 1121 1122 size_t G1CollectorPolicy::predict_rs_lengths() const { 1123 return get_new_size_prediction(_rs_lengths_seq); 1124 } 1125 1126 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1127 return get_new_size_prediction(_rs_length_diff_seq); 1128 } 1129 1130 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1131 return get_new_prediction(_alloc_rate_ms_seq); 1132 } 1133 1134 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1135 return get_new_prediction(_cost_per_card_ms_seq); 1136 } 1137 1138 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1139 return get_new_prediction(_cost_scan_hcc_seq); 1140 } 1141 1142 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1143 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1144 } 1145 1146 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1147 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1148 } 1149 1150 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1151 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1152 return predict_young_cards_per_entry_ratio(); 1153 } else { 1154 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1155 } 1156 } 1157 1158 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1159 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1160 } 1161 1162 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1163 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1164 } 1165 1166 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1167 if (collector_state()->gcs_are_young()) { 1168 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1169 } else { 1170 return predict_mixed_rs_scan_time_ms(card_num); 1171 } 1172 } 1173 1174 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1175 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1176 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1177 } else { 1178 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1179 } 1180 } 1181 1182 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1183 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1184 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1185 } else { 1186 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1187 } 1188 } 1189 1190 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1191 if (collector_state()->during_concurrent_mark()) { 1192 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1193 } else { 1194 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1195 } 1196 } 1197 1198 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1199 return get_new_prediction(_constant_other_time_ms_seq); 1200 } 1201 1202 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1203 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1204 } 1205 1206 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1207 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1208 } 1209 1210 double G1CollectorPolicy::predict_remark_time_ms() const { 1211 return get_new_prediction(_concurrent_mark_remark_times_ms); 1212 } 1213 1214 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1215 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1216 } 1217 1218 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1219 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1220 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1221 double pred = get_new_prediction(seq); 1222 if (pred > 1.0) { 1223 pred = 1.0; 1224 } 1225 return pred; 1226 } 1227 1228 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1229 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1230 } 1231 1232 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1233 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1234 } 1235 1236 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1237 size_t scanned_cards) const { 1238 return 1239 predict_rs_update_time_ms(pending_cards) + 1240 predict_rs_scan_time_ms(scanned_cards) + 1241 predict_constant_other_time_ms(); 1242 } 1243 1244 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1245 size_t rs_length = predict_rs_lengths() + predict_rs_length_diff(); 1246 size_t card_num; 1247 if (collector_state()->gcs_are_young()) { 1248 card_num = predict_young_card_num(rs_length); 1249 } else { 1250 card_num = predict_non_young_card_num(rs_length); 1251 } 1252 return predict_base_elapsed_time_ms(pending_cards, card_num); 1253 } 1254 1255 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1256 size_t bytes_to_copy; 1257 if (hr->is_marked()) 1258 bytes_to_copy = hr->max_live_bytes(); 1259 else { 1260 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1261 int age = hr->age_in_surv_rate_group(); 1262 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1263 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1264 } 1265 return bytes_to_copy; 1266 } 1267 1268 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1269 bool for_young_gc) const { 1270 size_t rs_length = hr->rem_set()->occupied(); 1271 size_t card_num; 1272 1273 // Predicting the number of cards is based on which type of GC 1274 // we're predicting for. 1275 if (for_young_gc) { 1276 card_num = predict_young_card_num(rs_length); 1277 } else { 1278 card_num = predict_non_young_card_num(rs_length); 1279 } 1280 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1281 1282 double region_elapsed_time_ms = 1283 predict_rs_scan_time_ms(card_num) + 1284 predict_object_copy_time_ms(bytes_to_copy); 1285 1286 // The prediction of the "other" time for this region is based 1287 // upon the region type and NOT the GC type. 1288 if (hr->is_young()) { 1289 region_elapsed_time_ms += predict_young_other_time_ms(1); 1290 } else { 1291 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1292 } 1293 return region_elapsed_time_ms; 1294 } 1295 1296 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1297 double elapsed_ms) { 1298 _recent_gc_times_ms->add(elapsed_ms); 1299 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1300 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1301 } 1302 1303 void G1CollectorPolicy::clear_ratio_check_data() { 1304 _ratio_over_threshold_count = 0; 1305 _ratio_over_threshold_sum = 0.0; 1306 _pauses_since_start = 0; 1307 } 1308 1309 size_t G1CollectorPolicy::expansion_amount() { 1310 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1311 double last_gc_overhead = _last_pause_time_ratio * 100.0; 1312 double threshold = _gc_overhead_perc; 1313 size_t expand_bytes = 0; 1314 1315 // If the heap is at less than half its maximum size, scale the threshold down, 1316 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1317 // though the scaling code will likely keep the increase small. 1318 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1319 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1320 threshold = MAX2(threshold, 1.0); 1321 } 1322 1323 // If the last GC time ratio is over the threshold, increment the count of 1324 // times it has been exceeded, and add this ratio to the sum of exceeded 1325 // ratios. 1326 if (last_gc_overhead > threshold) { 1327 _ratio_over_threshold_count++; 1328 _ratio_over_threshold_sum += last_gc_overhead; 1329 } 1330 1331 // Check if we've had enough GC time ratio checks that were over the 1332 // threshold to trigger an expansion. We'll also expand if we've 1333 // reached the end of the history buffer and the average of all entries 1334 // is still over the threshold. This indicates a smaller number of GCs were 1335 // long enough to make the average exceed the threshold. 1336 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1337 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1338 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1339 size_t min_expand_bytes = HeapRegion::GrainBytes; 1340 size_t reserved_bytes = _g1->max_capacity(); 1341 size_t committed_bytes = _g1->capacity(); 1342 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1343 size_t expand_bytes_via_pct = 1344 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1345 double scale_factor = 1.0; 1346 1347 // If the current size is less than 1/4 of the Initial heap size, expand 1348 // by half of the delta between the current and Initial sizes. IE, grow 1349 // back quickly. 1350 // 1351 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1352 // the available expansion space, whichever is smaller, as the base 1353 // expansion size. Then possibly scale this size according to how much the 1354 // threshold has (on average) been exceeded by. If the delta is small 1355 // (less than the StartScaleDownAt value), scale the size down linearly, but 1356 // not by less than MinScaleDownFactor. If the delta is large (greater than 1357 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1358 // times the base size. The scaling will be linear in the range from 1359 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1360 // ScaleUpRange sets the rate of scaling up. 1361 if (committed_bytes < InitialHeapSize / 4) { 1362 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1363 } else { 1364 double const MinScaleDownFactor = 0.2; 1365 double const MaxScaleUpFactor = 2; 1366 double const StartScaleDownAt = _gc_overhead_perc; 1367 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1368 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1369 1370 double ratio_delta; 1371 if (filled_history_buffer) { 1372 ratio_delta = recent_gc_overhead - threshold; 1373 } else { 1374 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1375 } 1376 1377 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1378 if (ratio_delta < StartScaleDownAt) { 1379 scale_factor = ratio_delta / StartScaleDownAt; 1380 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1381 } else if (ratio_delta > StartScaleUpAt) { 1382 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1383 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1384 } 1385 } 1386 1387 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1388 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1389 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1390 1391 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1392 1393 // Ensure the expansion size is at least the minimum growth amount 1394 // and at most the remaining uncommitted byte size. 1395 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1396 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1397 1398 clear_ratio_check_data(); 1399 } else { 1400 // An expansion was not triggered. If we've started counting, increment 1401 // the number of checks we've made in the current window. If we've 1402 // reached the end of the window without resizing, clear the counters to 1403 // start again the next time we see a ratio above the threshold. 1404 if (_ratio_over_threshold_count > 0) { 1405 _pauses_since_start++; 1406 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1407 clear_ratio_check_data(); 1408 } 1409 } 1410 } 1411 1412 return expand_bytes; 1413 } 1414 1415 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1416 #ifndef PRODUCT 1417 _short_lived_surv_rate_group->print_surv_rate_summary(); 1418 // add this call for any other surv rate groups 1419 #endif // PRODUCT 1420 } 1421 1422 bool G1CollectorPolicy::is_young_list_full() const { 1423 uint young_list_length = _g1->young_list()->length(); 1424 uint young_list_target_length = _young_list_target_length; 1425 return young_list_length >= young_list_target_length; 1426 } 1427 1428 bool G1CollectorPolicy::can_expand_young_list() const { 1429 uint young_list_length = _g1->young_list()->length(); 1430 uint young_list_max_length = _young_list_max_length; 1431 return young_list_length < young_list_max_length; 1432 } 1433 1434 bool G1CollectorPolicy::adaptive_young_list_length() const { 1435 return _young_gen_sizer->adaptive_young_list_length(); 1436 } 1437 1438 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1439 uint expansion_region_num = 0; 1440 if (GCLockerEdenExpansionPercent > 0) { 1441 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1442 double expansion_region_num_d = perc * (double) _young_list_target_length; 1443 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1444 // less than 1.0) we'll get 1. 1445 expansion_region_num = (uint) ceil(expansion_region_num_d); 1446 } else { 1447 assert(expansion_region_num == 0, "sanity"); 1448 } 1449 _young_list_max_length = _young_list_target_length + expansion_region_num; 1450 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1451 } 1452 1453 // Calculates survivor space parameters. 1454 void G1CollectorPolicy::update_survivors_policy() { 1455 double max_survivor_regions_d = 1456 (double) _young_list_target_length / (double) SurvivorRatio; 1457 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1458 // smaller than 1.0) we'll get 1. 1459 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1460 1461 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1462 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1463 } 1464 1465 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1466 // We actually check whether we are marking here and not if we are in a 1467 // reclamation phase. This means that we will schedule a concurrent mark 1468 // even while we are still in the process of reclaiming memory. 1469 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1470 if (!during_cycle) { 1471 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1472 collector_state()->set_initiate_conc_mark_if_possible(true); 1473 return true; 1474 } else { 1475 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1476 return false; 1477 } 1478 } 1479 1480 void G1CollectorPolicy::initiate_conc_mark() { 1481 collector_state()->set_during_initial_mark_pause(true); 1482 collector_state()->set_initiate_conc_mark_if_possible(false); 1483 } 1484 1485 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1486 // We are about to decide on whether this pause will be an 1487 // initial-mark pause. 1488 1489 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1490 // will set it here if we have to. However, it should be cleared by 1491 // the end of the pause (it's only set for the duration of an 1492 // initial-mark pause). 1493 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1494 1495 if (collector_state()->initiate_conc_mark_if_possible()) { 1496 // We had noticed on a previous pause that the heap occupancy has 1497 // gone over the initiating threshold and we should start a 1498 // concurrent marking cycle. So we might initiate one. 1499 1500 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1501 // Initiate a new initial mark if there is no marking or reclamation going on. 1502 initiate_conc_mark(); 1503 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1504 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1505 // Initiate a user requested initial mark. An initial mark must be young only 1506 // GC, so the collector state must be updated to reflect this. 1507 collector_state()->set_gcs_are_young(true); 1508 collector_state()->set_last_young_gc(false); 1509 1510 abort_time_to_mixed_tracking(); 1511 initiate_conc_mark(); 1512 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1513 } else { 1514 // The concurrent marking thread is still finishing up the 1515 // previous cycle. If we start one right now the two cycles 1516 // overlap. In particular, the concurrent marking thread might 1517 // be in the process of clearing the next marking bitmap (which 1518 // we will use for the next cycle if we start one). Starting a 1519 // cycle now will be bad given that parts of the marking 1520 // information might get cleared by the marking thread. And we 1521 // cannot wait for the marking thread to finish the cycle as it 1522 // periodically yields while clearing the next marking bitmap 1523 // and, if it's in a yield point, it's waiting for us to 1524 // finish. So, at this point we will not start a cycle and we'll 1525 // let the concurrent marking thread complete the last one. 1526 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1527 } 1528 } 1529 } 1530 1531 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1532 G1CollectedHeap* _g1h; 1533 CSetChooserParUpdater _cset_updater; 1534 1535 public: 1536 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1537 uint chunk_size) : 1538 _g1h(G1CollectedHeap::heap()), 1539 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1540 1541 bool doHeapRegion(HeapRegion* r) { 1542 // Do we have any marking information for this region? 1543 if (r->is_marked()) { 1544 // We will skip any region that's currently used as an old GC 1545 // alloc region (we should not consider those for collection 1546 // before we fill them up). 1547 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1548 _cset_updater.add_region(r); 1549 } 1550 } 1551 return false; 1552 } 1553 }; 1554 1555 class ParKnownGarbageTask: public AbstractGangTask { 1556 CollectionSetChooser* _hrSorted; 1557 uint _chunk_size; 1558 G1CollectedHeap* _g1; 1559 HeapRegionClaimer _hrclaimer; 1560 1561 public: 1562 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1563 AbstractGangTask("ParKnownGarbageTask"), 1564 _hrSorted(hrSorted), _chunk_size(chunk_size), 1565 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1566 1567 void work(uint worker_id) { 1568 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1569 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1570 } 1571 }; 1572 1573 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1574 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1575 const uint overpartition_factor = 4; 1576 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1577 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1578 } 1579 1580 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1581 cset_chooser()->clear(); 1582 1583 WorkGang* workers = _g1->workers(); 1584 uint n_workers = workers->active_workers(); 1585 1586 uint n_regions = _g1->num_regions(); 1587 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1588 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1589 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1590 workers->run_task(&par_known_garbage_task); 1591 1592 cset_chooser()->sort_regions(); 1593 1594 double end_sec = os::elapsedTime(); 1595 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1596 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1597 _prev_collection_pause_end_ms += elapsed_time_ms; 1598 1599 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1600 } 1601 1602 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 1603 // Returns the given amount of reclaimable bytes (that represents 1604 // the amount of reclaimable space still to be collected) as a 1605 // percentage of the current heap capacity. 1606 size_t capacity_bytes = _g1->capacity(); 1607 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1608 } 1609 1610 void G1CollectorPolicy::maybe_start_marking() { 1611 if (need_to_start_conc_mark("end of GC")) { 1612 // Note: this might have already been set, if during the last 1613 // pause we decided to start a cycle but at the beginning of 1614 // this pause we decided to postpone it. That's OK. 1615 collector_state()->set_initiate_conc_mark_if_possible(true); 1616 } 1617 } 1618 1619 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 1620 assert(!collector_state()->full_collection(), "must be"); 1621 if (collector_state()->during_initial_mark_pause()) { 1622 assert(collector_state()->last_gc_was_young(), "must be"); 1623 assert(!collector_state()->last_young_gc(), "must be"); 1624 return InitialMarkGC; 1625 } else if (collector_state()->last_young_gc()) { 1626 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1627 assert(collector_state()->last_gc_was_young(), "must be"); 1628 return LastYoungGC; 1629 } else if (!collector_state()->last_gc_was_young()) { 1630 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1631 assert(!collector_state()->last_young_gc(), "must be"); 1632 return MixedGC; 1633 } else { 1634 assert(collector_state()->last_gc_was_young(), "must be"); 1635 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1636 assert(!collector_state()->last_young_gc(), "must be"); 1637 return YoungOnlyGC; 1638 } 1639 } 1640 1641 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 1642 // Manage the MMU tracker. For some reason it ignores Full GCs. 1643 if (kind != FullGC) { 1644 _mmu_tracker->add_pause(start, end); 1645 } 1646 // Manage the mutator time tracking from initial mark to first mixed gc. 1647 switch (kind) { 1648 case FullGC: 1649 abort_time_to_mixed_tracking(); 1650 break; 1651 case Cleanup: 1652 case Remark: 1653 case YoungOnlyGC: 1654 case LastYoungGC: 1655 _initial_mark_to_mixed.add_pause(end - start); 1656 break; 1657 case InitialMarkGC: 1658 _initial_mark_to_mixed.record_initial_mark_end(end); 1659 break; 1660 case MixedGC: 1661 _initial_mark_to_mixed.record_mixed_gc_start(start); 1662 break; 1663 default: 1664 ShouldNotReachHere(); 1665 } 1666 } 1667 1668 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 1669 _initial_mark_to_mixed.reset(); 1670 } 1671 1672 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1673 const char* false_action_str) const { 1674 if (cset_chooser()->is_empty()) { 1675 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1676 return false; 1677 } 1678 1679 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1680 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 1681 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1682 double threshold = (double) G1HeapWastePercent; 1683 if (reclaimable_perc <= threshold) { 1684 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1685 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1686 return false; 1687 } 1688 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1689 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1690 return true; 1691 } 1692 1693 uint G1CollectorPolicy::calc_min_old_cset_length() const { 1694 // The min old CSet region bound is based on the maximum desired 1695 // number of mixed GCs after a cycle. I.e., even if some old regions 1696 // look expensive, we should add them to the CSet anyway to make 1697 // sure we go through the available old regions in no more than the 1698 // maximum desired number of mixed GCs. 1699 // 1700 // The calculation is based on the number of marked regions we added 1701 // to the CSet chooser in the first place, not how many remain, so 1702 // that the result is the same during all mixed GCs that follow a cycle. 1703 1704 const size_t region_num = (size_t) cset_chooser()->length(); 1705 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1706 size_t result = region_num / gc_num; 1707 // emulate ceiling 1708 if (result * gc_num < region_num) { 1709 result += 1; 1710 } 1711 return (uint) result; 1712 } 1713 1714 uint G1CollectorPolicy::calc_max_old_cset_length() const { 1715 // The max old CSet region bound is based on the threshold expressed 1716 // as a percentage of the heap size. I.e., it should bound the 1717 // number of old regions added to the CSet irrespective of how many 1718 // of them are available. 1719 1720 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1721 const size_t region_num = g1h->num_regions(); 1722 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1723 size_t result = region_num * perc / 100; 1724 // emulate ceiling 1725 if (100 * result < region_num * perc) { 1726 result += 1; 1727 } 1728 return (uint) result; 1729 } 1730 1731 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { 1732 double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); 1733 _collection_set->finalize_old_part(time_remaining_ms); 1734 } 1735