1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1IHOPControl.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1YoungGenSizer.hpp" 35 #include "gc/g1/heapRegion.inline.hpp" 36 #include "gc/g1/heapRegionRemSet.hpp" 37 #include "gc/shared/gcPolicyCounters.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "utilities/debug.hpp" 42 #include "utilities/pair.hpp" 43 44 // Different defaults for different number of GC threads 45 // They were chosen by running GCOld and SPECjbb on debris with different 46 // numbers of GC threads and choosing them based on the results 47 48 // all the same 49 static double rs_length_diff_defaults[] = { 50 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 51 }; 52 53 static double cost_per_card_ms_defaults[] = { 54 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 55 }; 56 57 // all the same 58 static double young_cards_per_entry_ratio_defaults[] = { 59 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 60 }; 61 62 static double cost_per_entry_ms_defaults[] = { 63 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 64 }; 65 66 static double cost_per_byte_ms_defaults[] = { 67 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 68 }; 69 70 // these should be pretty consistent 71 static double constant_other_time_ms_defaults[] = { 72 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 73 }; 74 75 76 static double young_other_cost_per_region_ms_defaults[] = { 77 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 78 }; 79 80 static double non_young_other_cost_per_region_ms_defaults[] = { 81 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 82 }; 83 84 G1CollectorPolicy::G1CollectorPolicy() : 85 _predictor(G1ConfidencePercent / 100.0), 86 87 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 88 89 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 91 92 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 93 _prev_collection_pause_end_ms(0.0), 94 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 105 _non_young_other_cost_per_region_ms_seq( 106 new TruncatedSeq(TruncatedSeqLength)), 107 108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 110 111 _pause_time_target_ms((double) MaxGCPauseMillis), 112 113 _recent_prev_end_times_for_all_gcs_sec( 114 new TruncatedSeq(NumPrevPausesForHeuristics)), 115 116 _recent_avg_pause_time_ratio(0.0), 117 _rs_lengths_prediction(0), 118 _max_survivor_regions(0), 119 120 // add here any more surv rate groups 121 _survivors_age_table(true), 122 123 _gc_overhead_perc(0.0), 124 125 _bytes_allocated_in_old_since_last_gc(0), 126 _ihop_control(NULL), 127 _initial_mark_to_mixed() { 128 129 // SurvRateGroups below must be initialized after the predictor because they 130 // indirectly use it through this object passed to their constructor. 131 _short_lived_surv_rate_group = 132 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 133 _survivor_surv_rate_group = 134 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 135 136 // Set up the region size and associated fields. Given that the 137 // policy is created before the heap, we have to set this up here, 138 // so it's done as soon as possible. 139 140 // It would have been natural to pass initial_heap_byte_size() and 141 // max_heap_byte_size() to setup_heap_region_size() but those have 142 // not been set up at this point since they should be aligned with 143 // the region size. So, there is a circular dependency here. We base 144 // the region size on the heap size, but the heap size should be 145 // aligned with the region size. To get around this we use the 146 // unaligned values for the heap. 147 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 148 HeapRegionRemSet::setup_remset_size(); 149 150 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 151 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 152 clear_ratio_check_data(); 153 154 _phase_times = new G1GCPhaseTimes(ParallelGCThreads); 155 156 int index = MIN2(ParallelGCThreads - 1, 7u); 157 158 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 159 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 160 _cost_scan_hcc_seq->add(0.0); 161 _young_cards_per_entry_ratio_seq->add( 162 young_cards_per_entry_ratio_defaults[index]); 163 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 164 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 165 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 166 _young_other_cost_per_region_ms_seq->add( 167 young_other_cost_per_region_ms_defaults[index]); 168 _non_young_other_cost_per_region_ms_seq->add( 169 non_young_other_cost_per_region_ms_defaults[index]); 170 171 // Below, we might need to calculate the pause time target based on 172 // the pause interval. When we do so we are going to give G1 maximum 173 // flexibility and allow it to do pauses when it needs to. So, we'll 174 // arrange that the pause interval to be pause time target + 1 to 175 // ensure that a) the pause time target is maximized with respect to 176 // the pause interval and b) we maintain the invariant that pause 177 // time target < pause interval. If the user does not want this 178 // maximum flexibility, they will have to set the pause interval 179 // explicitly. 180 181 // First make sure that, if either parameter is set, its value is 182 // reasonable. 183 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 184 if (MaxGCPauseMillis < 1) { 185 vm_exit_during_initialization("MaxGCPauseMillis should be " 186 "greater than 0"); 187 } 188 } 189 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 190 if (GCPauseIntervalMillis < 1) { 191 vm_exit_during_initialization("GCPauseIntervalMillis should be " 192 "greater than 0"); 193 } 194 } 195 196 // Then, if the pause time target parameter was not set, set it to 197 // the default value. 198 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 199 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 200 // The default pause time target in G1 is 200ms 201 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 202 } else { 203 // We do not allow the pause interval to be set without the 204 // pause time target 205 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 206 "without setting MaxGCPauseMillis"); 207 } 208 } 209 210 // Then, if the interval parameter was not set, set it according to 211 // the pause time target (this will also deal with the case when the 212 // pause time target is the default value). 213 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 214 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 215 } 216 217 // Finally, make sure that the two parameters are consistent. 218 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 219 char buffer[256]; 220 jio_snprintf(buffer, 256, 221 "MaxGCPauseMillis (%u) should be less than " 222 "GCPauseIntervalMillis (%u)", 223 MaxGCPauseMillis, GCPauseIntervalMillis); 224 vm_exit_during_initialization(buffer); 225 } 226 227 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 228 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 229 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 230 231 // start conservatively (around 50ms is about right) 232 _concurrent_mark_remark_times_ms->add(0.05); 233 _concurrent_mark_cleanup_times_ms->add(0.20); 234 _tenuring_threshold = MaxTenuringThreshold; 235 236 assert(GCTimeRatio > 0, 237 "we should have set it to a default value set_g1_gc_flags() " 238 "if a user set it to 0"); 239 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 240 241 uintx reserve_perc = G1ReservePercent; 242 // Put an artificial ceiling on this so that it's not set to a silly value. 243 if (reserve_perc > 50) { 244 reserve_perc = 50; 245 warning("G1ReservePercent is set to a value that is too large, " 246 "it's been updated to " UINTX_FORMAT, reserve_perc); 247 } 248 _reserve_factor = (double) reserve_perc / 100.0; 249 // This will be set when the heap is expanded 250 // for the first time during initialization. 251 _reserve_regions = 0; 252 253 _ihop_control = create_ihop_control(); 254 } 255 256 G1CollectorPolicy::~G1CollectorPolicy() { 257 delete _ihop_control; 258 } 259 260 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 261 return _predictor.get_new_prediction(seq); 262 } 263 264 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { 265 return (size_t)get_new_prediction(seq); 266 } 267 268 void G1CollectorPolicy::initialize_alignments() { 269 _space_alignment = HeapRegion::GrainBytes; 270 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 271 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 272 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 273 } 274 275 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 276 277 void G1CollectorPolicy::post_heap_initialize() { 278 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 279 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 280 if (max_young_size != MaxNewSize) { 281 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 282 } 283 } 284 285 void G1CollectorPolicy::initialize_flags() { 286 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 287 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 288 } 289 290 if (SurvivorRatio < 1) { 291 vm_exit_during_initialization("Invalid survivor ratio specified"); 292 } 293 CollectorPolicy::initialize_flags(); 294 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 295 } 296 297 298 void G1CollectorPolicy::init() { 299 // Set aside an initial future to_space. 300 _g1 = G1CollectedHeap::heap(); 301 _collection_set = _g1->collection_set(); 302 _collection_set->set_policy(this); 303 304 assert(Heap_lock->owned_by_self(), "Locking discipline."); 305 306 initialize_gc_policy_counters(); 307 308 if (adaptive_young_list_length()) { 309 _young_list_fixed_length = 0; 310 } else { 311 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 312 } 313 _free_regions_at_end_of_collection = _g1->num_free_regions(); 314 315 update_young_list_max_and_target_length(); 316 // We may immediately start allocating regions and placing them on the 317 // collection set list. Initialize the per-collection set info 318 _collection_set->start_incremental_building(); 319 } 320 321 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 322 phase_times()->note_gc_start(num_active_workers); 323 } 324 325 // Create the jstat counters for the policy. 326 void G1CollectorPolicy::initialize_gc_policy_counters() { 327 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 328 } 329 330 bool G1CollectorPolicy::predict_will_fit(uint young_length, 331 double base_time_ms, 332 uint base_free_regions, 333 double target_pause_time_ms) const { 334 if (young_length >= base_free_regions) { 335 // end condition 1: not enough space for the young regions 336 return false; 337 } 338 339 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 340 size_t bytes_to_copy = 341 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 342 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 343 double young_other_time_ms = predict_young_other_time_ms(young_length); 344 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 345 if (pause_time_ms > target_pause_time_ms) { 346 // end condition 2: prediction is over the target pause time 347 return false; 348 } 349 350 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 351 352 // When copying, we will likely need more bytes free than is live in the region. 353 // Add some safety margin to factor in the confidence of our guess, and the 354 // natural expected waste. 355 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 356 // of the calculation: the lower the confidence, the more headroom. 357 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 358 // copying due to anticipated waste in the PLABs. 359 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 360 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 361 362 if (expected_bytes_to_copy > free_bytes) { 363 // end condition 3: out-of-space 364 return false; 365 } 366 367 // success! 368 return true; 369 } 370 371 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 372 // re-calculate the necessary reserve 373 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 374 // We use ceiling so that if reserve_regions_d is > 0.0 (but 375 // smaller than 1.0) we'll get 1. 376 _reserve_regions = (uint) ceil(reserve_regions_d); 377 378 _young_gen_sizer->heap_size_changed(new_number_of_regions); 379 380 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 381 } 382 383 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 384 uint base_min_length) const { 385 uint desired_min_length = 0; 386 if (adaptive_young_list_length()) { 387 if (_alloc_rate_ms_seq->num() > 3) { 388 double now_sec = os::elapsedTime(); 389 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 390 double alloc_rate_ms = predict_alloc_rate_ms(); 391 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 392 } else { 393 // otherwise we don't have enough info to make the prediction 394 } 395 } 396 desired_min_length += base_min_length; 397 // make sure we don't go below any user-defined minimum bound 398 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 399 } 400 401 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 402 // Here, we might want to also take into account any additional 403 // constraints (i.e., user-defined minimum bound). Currently, we 404 // effectively don't set this bound. 405 return _young_gen_sizer->max_desired_young_length(); 406 } 407 408 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 409 return update_young_list_max_and_target_length(predict_rs_lengths()); 410 } 411 412 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 413 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 414 update_max_gc_locker_expansion(); 415 return unbounded_target_length; 416 } 417 418 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 419 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 420 _young_list_target_length = young_lengths.first; 421 return young_lengths.second; 422 } 423 424 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 425 YoungTargetLengths result; 426 427 // Calculate the absolute and desired min bounds first. 428 429 // This is how many young regions we already have (currently: the survivors). 430 const uint base_min_length = _g1->young_list()->survivor_length(); 431 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 432 // This is the absolute minimum young length. Ensure that we 433 // will at least have one eden region available for allocation. 434 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 435 // If we shrank the young list target it should not shrink below the current size. 436 desired_min_length = MAX2(desired_min_length, absolute_min_length); 437 // Calculate the absolute and desired max bounds. 438 439 uint desired_max_length = calculate_young_list_desired_max_length(); 440 441 uint young_list_target_length = 0; 442 if (adaptive_young_list_length()) { 443 if (collector_state()->gcs_are_young()) { 444 young_list_target_length = 445 calculate_young_list_target_length(rs_lengths, 446 base_min_length, 447 desired_min_length, 448 desired_max_length); 449 } else { 450 // Don't calculate anything and let the code below bound it to 451 // the desired_min_length, i.e., do the next GC as soon as 452 // possible to maximize how many old regions we can add to it. 453 } 454 } else { 455 // The user asked for a fixed young gen so we'll fix the young gen 456 // whether the next GC is young or mixed. 457 young_list_target_length = _young_list_fixed_length; 458 } 459 460 result.second = young_list_target_length; 461 462 // We will try our best not to "eat" into the reserve. 463 uint absolute_max_length = 0; 464 if (_free_regions_at_end_of_collection > _reserve_regions) { 465 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 466 } 467 if (desired_max_length > absolute_max_length) { 468 desired_max_length = absolute_max_length; 469 } 470 471 // Make sure we don't go over the desired max length, nor under the 472 // desired min length. In case they clash, desired_min_length wins 473 // which is why that test is second. 474 if (young_list_target_length > desired_max_length) { 475 young_list_target_length = desired_max_length; 476 } 477 if (young_list_target_length < desired_min_length) { 478 young_list_target_length = desired_min_length; 479 } 480 481 assert(young_list_target_length > base_min_length, 482 "we should be able to allocate at least one eden region"); 483 assert(young_list_target_length >= absolute_min_length, "post-condition"); 484 485 result.first = young_list_target_length; 486 return result; 487 } 488 489 uint 490 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 491 uint base_min_length, 492 uint desired_min_length, 493 uint desired_max_length) const { 494 assert(adaptive_young_list_length(), "pre-condition"); 495 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 496 497 // In case some edge-condition makes the desired max length too small... 498 if (desired_max_length <= desired_min_length) { 499 return desired_min_length; 500 } 501 502 // We'll adjust min_young_length and max_young_length not to include 503 // the already allocated young regions (i.e., so they reflect the 504 // min and max eden regions we'll allocate). The base_min_length 505 // will be reflected in the predictions by the 506 // survivor_regions_evac_time prediction. 507 assert(desired_min_length > base_min_length, "invariant"); 508 uint min_young_length = desired_min_length - base_min_length; 509 assert(desired_max_length > base_min_length, "invariant"); 510 uint max_young_length = desired_max_length - base_min_length; 511 512 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 513 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 514 size_t pending_cards = get_new_size_prediction(_pending_cards_seq); 515 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 516 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 517 double base_time_ms = 518 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 519 survivor_regions_evac_time; 520 uint available_free_regions = _free_regions_at_end_of_collection; 521 uint base_free_regions = 0; 522 if (available_free_regions > _reserve_regions) { 523 base_free_regions = available_free_regions - _reserve_regions; 524 } 525 526 // Here, we will make sure that the shortest young length that 527 // makes sense fits within the target pause time. 528 529 if (predict_will_fit(min_young_length, base_time_ms, 530 base_free_regions, target_pause_time_ms)) { 531 // The shortest young length will fit into the target pause time; 532 // we'll now check whether the absolute maximum number of young 533 // regions will fit in the target pause time. If not, we'll do 534 // a binary search between min_young_length and max_young_length. 535 if (predict_will_fit(max_young_length, base_time_ms, 536 base_free_regions, target_pause_time_ms)) { 537 // The maximum young length will fit into the target pause time. 538 // We are done so set min young length to the maximum length (as 539 // the result is assumed to be returned in min_young_length). 540 min_young_length = max_young_length; 541 } else { 542 // The maximum possible number of young regions will not fit within 543 // the target pause time so we'll search for the optimal 544 // length. The loop invariants are: 545 // 546 // min_young_length < max_young_length 547 // min_young_length is known to fit into the target pause time 548 // max_young_length is known not to fit into the target pause time 549 // 550 // Going into the loop we know the above hold as we've just 551 // checked them. Every time around the loop we check whether 552 // the middle value between min_young_length and 553 // max_young_length fits into the target pause time. If it 554 // does, it becomes the new min. If it doesn't, it becomes 555 // the new max. This way we maintain the loop invariants. 556 557 assert(min_young_length < max_young_length, "invariant"); 558 uint diff = (max_young_length - min_young_length) / 2; 559 while (diff > 0) { 560 uint young_length = min_young_length + diff; 561 if (predict_will_fit(young_length, base_time_ms, 562 base_free_regions, target_pause_time_ms)) { 563 min_young_length = young_length; 564 } else { 565 max_young_length = young_length; 566 } 567 assert(min_young_length < max_young_length, "invariant"); 568 diff = (max_young_length - min_young_length) / 2; 569 } 570 // The results is min_young_length which, according to the 571 // loop invariants, should fit within the target pause time. 572 573 // These are the post-conditions of the binary search above: 574 assert(min_young_length < max_young_length, 575 "otherwise we should have discovered that max_young_length " 576 "fits into the pause target and not done the binary search"); 577 assert(predict_will_fit(min_young_length, base_time_ms, 578 base_free_regions, target_pause_time_ms), 579 "min_young_length, the result of the binary search, should " 580 "fit into the pause target"); 581 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 582 base_free_regions, target_pause_time_ms), 583 "min_young_length, the result of the binary search, should be " 584 "optimal, so no larger length should fit into the pause target"); 585 } 586 } else { 587 // Even the minimum length doesn't fit into the pause time 588 // target, return it as the result nevertheless. 589 } 590 return base_min_length + min_young_length; 591 } 592 593 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 594 double survivor_regions_evac_time = 0.0; 595 for (HeapRegion * r = _g1->young_list()->first_survivor_region(); 596 r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region(); 597 r = r->get_next_young_region()) { 598 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 599 } 600 return survivor_regions_evac_time; 601 } 602 603 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { 604 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 605 606 if (rs_lengths > _rs_lengths_prediction) { 607 // add 10% to avoid having to recalculate often 608 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 609 update_rs_lengths_prediction(rs_lengths_prediction); 610 611 update_young_list_max_and_target_length(rs_lengths_prediction); 612 } 613 } 614 615 void G1CollectorPolicy::update_rs_lengths_prediction() { 616 update_rs_lengths_prediction(predict_rs_lengths()); 617 } 618 619 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 620 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 621 _rs_lengths_prediction = prediction; 622 } 623 } 624 625 #ifndef PRODUCT 626 bool G1CollectorPolicy::verify_young_ages() { 627 HeapRegion* head = _g1->young_list()->first_region(); 628 return 629 verify_young_ages(head, _short_lived_surv_rate_group); 630 // also call verify_young_ages on any additional surv rate groups 631 } 632 633 bool 634 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 635 SurvRateGroup *surv_rate_group) { 636 guarantee( surv_rate_group != NULL, "pre-condition" ); 637 638 const char* name = surv_rate_group->name(); 639 bool ret = true; 640 int prev_age = -1; 641 642 for (HeapRegion* curr = head; 643 curr != NULL; 644 curr = curr->get_next_young_region()) { 645 SurvRateGroup* group = curr->surv_rate_group(); 646 if (group == NULL && !curr->is_survivor()) { 647 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); 648 ret = false; 649 } 650 651 if (surv_rate_group == group) { 652 int age = curr->age_in_surv_rate_group(); 653 654 if (age < 0) { 655 log_error(gc, verify)("## %s: encountered negative age", name); 656 ret = false; 657 } 658 659 if (age <= prev_age) { 660 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 661 ret = false; 662 } 663 prev_age = age; 664 } 665 } 666 667 return ret; 668 } 669 #endif // PRODUCT 670 671 void G1CollectorPolicy::record_full_collection_start() { 672 _full_collection_start_sec = os::elapsedTime(); 673 // Release the future to-space so that it is available for compaction into. 674 collector_state()->set_full_collection(true); 675 } 676 677 void G1CollectorPolicy::record_full_collection_end() { 678 // Consider this like a collection pause for the purposes of allocation 679 // since last pause. 680 double end_sec = os::elapsedTime(); 681 double full_gc_time_sec = end_sec - _full_collection_start_sec; 682 double full_gc_time_ms = full_gc_time_sec * 1000.0; 683 684 update_recent_gc_times(end_sec, full_gc_time_ms); 685 686 collector_state()->set_full_collection(false); 687 688 // "Nuke" the heuristics that control the young/mixed GC 689 // transitions and make sure we start with young GCs after the Full GC. 690 collector_state()->set_gcs_are_young(true); 691 collector_state()->set_last_young_gc(false); 692 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 693 collector_state()->set_during_initial_mark_pause(false); 694 collector_state()->set_in_marking_window(false); 695 collector_state()->set_in_marking_window_im(false); 696 697 _short_lived_surv_rate_group->start_adding_regions(); 698 // also call this on any additional surv rate groups 699 700 _free_regions_at_end_of_collection = _g1->num_free_regions(); 701 // Reset survivors SurvRateGroup. 702 _survivor_surv_rate_group->reset(); 703 update_young_list_max_and_target_length(); 704 update_rs_lengths_prediction(); 705 cset_chooser()->clear(); 706 707 _bytes_allocated_in_old_since_last_gc = 0; 708 709 record_pause(FullGC, _full_collection_start_sec, end_sec); 710 } 711 712 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 713 // We only need to do this here as the policy will only be applied 714 // to the GC we're about to start. so, no point is calculating this 715 // every time we calculate / recalculate the target young length. 716 update_survivors_policy(); 717 718 assert(_g1->used() == _g1->recalculate_used(), 719 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 720 _g1->used(), _g1->recalculate_used()); 721 722 phase_times()->record_cur_collection_start_sec(start_time_sec); 723 _pending_cards = _g1->pending_card_num(); 724 725 _collection_set->reset_bytes_used_before(); 726 _bytes_copied_during_gc = 0; 727 728 collector_state()->set_last_gc_was_young(false); 729 730 // do that for any other surv rate groups 731 _short_lived_surv_rate_group->stop_adding_regions(); 732 _survivors_age_table.clear(); 733 734 assert( verify_young_ages(), "region age verification" ); 735 } 736 737 void G1CollectorPolicy::record_concurrent_mark_init_end(double 738 mark_init_elapsed_time_ms) { 739 collector_state()->set_during_marking(true); 740 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 741 collector_state()->set_during_initial_mark_pause(false); 742 } 743 744 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 745 _mark_remark_start_sec = os::elapsedTime(); 746 collector_state()->set_during_marking(false); 747 } 748 749 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 750 double end_time_sec = os::elapsedTime(); 751 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 752 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 753 _prev_collection_pause_end_ms += elapsed_time_ms; 754 755 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 756 } 757 758 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 759 _mark_cleanup_start_sec = os::elapsedTime(); 760 } 761 762 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 763 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 764 "skip last young-only gc"); 765 collector_state()->set_last_young_gc(should_continue_with_reclaim); 766 // We skip the marking phase. 767 if (!should_continue_with_reclaim) { 768 abort_time_to_mixed_tracking(); 769 } 770 collector_state()->set_in_marking_window(false); 771 } 772 773 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 774 return phase_times()->average_time_ms(phase); 775 } 776 777 double G1CollectorPolicy::young_other_time_ms() const { 778 return phase_times()->young_cset_choice_time_ms() + 779 phase_times()->young_free_cset_time_ms(); 780 } 781 782 double G1CollectorPolicy::non_young_other_time_ms() const { 783 return phase_times()->non_young_cset_choice_time_ms() + 784 phase_times()->non_young_free_cset_time_ms(); 785 786 } 787 788 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 789 return pause_time_ms - 790 average_time_ms(G1GCPhaseTimes::UpdateRS) - 791 average_time_ms(G1GCPhaseTimes::ScanRS) - 792 average_time_ms(G1GCPhaseTimes::ObjCopy) - 793 average_time_ms(G1GCPhaseTimes::Termination); 794 } 795 796 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 797 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 798 } 799 800 CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { 801 return _collection_set->cset_chooser(); 802 } 803 804 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 805 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 806 } 807 808 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 809 if (about_to_start_mixed_phase()) { 810 return false; 811 } 812 813 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 814 815 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 816 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 817 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 818 819 bool result = false; 820 if (marking_request_bytes > marking_initiating_used_threshold) { 821 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 822 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 823 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 824 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 825 } 826 827 return result; 828 } 829 830 // Anything below that is considered to be zero 831 #define MIN_TIMER_GRANULARITY 0.0000001 832 833 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 834 double end_time_sec = os::elapsedTime(); 835 836 size_t cur_used_bytes = _g1->used(); 837 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 838 bool last_pause_included_initial_mark = false; 839 bool update_stats = !_g1->evacuation_failed(); 840 841 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 842 843 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 844 845 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 846 if (last_pause_included_initial_mark) { 847 record_concurrent_mark_init_end(0.0); 848 } else { 849 maybe_start_marking(); 850 } 851 852 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 853 if (app_time_ms < MIN_TIMER_GRANULARITY) { 854 // This usually happens due to the timer not having the required 855 // granularity. Some Linuxes are the usual culprits. 856 // We'll just set it to something (arbitrarily) small. 857 app_time_ms = 1.0; 858 } 859 860 if (update_stats) { 861 // We maintain the invariant that all objects allocated by mutator 862 // threads will be allocated out of eden regions. So, we can use 863 // the eden region number allocated since the previous GC to 864 // calculate the application's allocate rate. The only exception 865 // to that is humongous objects that are allocated separately. But 866 // given that humongous object allocations do not really affect 867 // either the pause's duration nor when the next pause will take 868 // place we can safely ignore them here. 869 uint regions_allocated = _collection_set->eden_region_length(); 870 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 871 _alloc_rate_ms_seq->add(alloc_rate_ms); 872 873 double interval_ms = 874 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 875 update_recent_gc_times(end_time_sec, pause_time_ms); 876 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 877 if (recent_avg_pause_time_ratio() < 0.0 || 878 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 879 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 880 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 881 if (_recent_avg_pause_time_ratio < 0.0) { 882 _recent_avg_pause_time_ratio = 0.0; 883 } else { 884 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 885 _recent_avg_pause_time_ratio = 1.0; 886 } 887 } 888 889 // Compute the ratio of just this last pause time to the entire time range stored 890 // in the vectors. Comparing this pause to the entire range, rather than only the 891 // most recent interval, has the effect of smoothing over a possible transient 'burst' 892 // of more frequent pauses that don't really reflect a change in heap occupancy. 893 // This reduces the likelihood of a needless heap expansion being triggered. 894 _last_pause_time_ratio = 895 (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; 896 } 897 898 bool new_in_marking_window = collector_state()->in_marking_window(); 899 bool new_in_marking_window_im = false; 900 if (last_pause_included_initial_mark) { 901 new_in_marking_window = true; 902 new_in_marking_window_im = true; 903 } 904 905 if (collector_state()->last_young_gc()) { 906 // This is supposed to to be the "last young GC" before we start 907 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 908 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 909 910 if (next_gc_should_be_mixed("start mixed GCs", 911 "do not start mixed GCs")) { 912 collector_state()->set_gcs_are_young(false); 913 } else { 914 // We aborted the mixed GC phase early. 915 abort_time_to_mixed_tracking(); 916 } 917 918 collector_state()->set_last_young_gc(false); 919 } 920 921 if (!collector_state()->last_gc_was_young()) { 922 // This is a mixed GC. Here we decide whether to continue doing 923 // mixed GCs or not. 924 if (!next_gc_should_be_mixed("continue mixed GCs", 925 "do not continue mixed GCs")) { 926 collector_state()->set_gcs_are_young(true); 927 928 maybe_start_marking(); 929 } 930 } 931 932 _short_lived_surv_rate_group->start_adding_regions(); 933 // Do that for any other surv rate groups 934 935 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; 936 937 if (update_stats) { 938 double cost_per_card_ms = 0.0; 939 if (_pending_cards > 0) { 940 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; 941 _cost_per_card_ms_seq->add(cost_per_card_ms); 942 } 943 _cost_scan_hcc_seq->add(scan_hcc_time_ms); 944 945 double cost_per_entry_ms = 0.0; 946 if (cards_scanned > 10) { 947 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 948 if (collector_state()->last_gc_was_young()) { 949 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 950 } else { 951 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 952 } 953 } 954 955 if (_max_rs_lengths > 0) { 956 double cards_per_entry_ratio = 957 (double) cards_scanned / (double) _max_rs_lengths; 958 if (collector_state()->last_gc_was_young()) { 959 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 960 } else { 961 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 962 } 963 } 964 965 // This is defensive. For a while _max_rs_lengths could get 966 // smaller than _recorded_rs_lengths which was causing 967 // rs_length_diff to get very large and mess up the RSet length 968 // predictions. The reason was unsafe concurrent updates to the 969 // _inc_cset_recorded_rs_lengths field which the code below guards 970 // against (see CR 7118202). This bug has now been fixed (see CR 971 // 7119027). However, I'm still worried that 972 // _inc_cset_recorded_rs_lengths might still end up somewhat 973 // inaccurate. The concurrent refinement thread calculates an 974 // RSet's length concurrently with other CR threads updating it 975 // which might cause it to calculate the length incorrectly (if, 976 // say, it's in mid-coarsening). So I'll leave in the defensive 977 // conditional below just in case. 978 size_t rs_length_diff = 0; 979 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); 980 if (_max_rs_lengths > recorded_rs_lengths) { 981 rs_length_diff = _max_rs_lengths - recorded_rs_lengths; 982 } 983 _rs_length_diff_seq->add((double) rs_length_diff); 984 985 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 986 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; 987 double cost_per_byte_ms = 0.0; 988 989 if (copied_bytes > 0) { 990 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 991 if (collector_state()->in_marking_window()) { 992 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 993 } else { 994 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 995 } 996 } 997 998 if (_collection_set->young_region_length() > 0) { 999 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 1000 _collection_set->young_region_length()); 1001 } 1002 1003 if (_collection_set->old_region_length() > 0) { 1004 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 1005 _collection_set->old_region_length()); 1006 } 1007 1008 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 1009 1010 _pending_cards_seq->add((double) _pending_cards); 1011 _rs_lengths_seq->add((double) _max_rs_lengths); 1012 } 1013 1014 collector_state()->set_in_marking_window(new_in_marking_window); 1015 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 1016 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1017 // IHOP control wants to know the expected young gen length if it were not 1018 // restrained by the heap reserve. Using the actual length would make the 1019 // prediction too small and the limit the young gen every time we get to the 1020 // predicted target occupancy. 1021 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 1022 update_rs_lengths_prediction(); 1023 1024 update_ihop_prediction(app_time_ms / 1000.0, 1025 _bytes_allocated_in_old_since_last_gc, 1026 last_unrestrained_young_length * HeapRegion::GrainBytes); 1027 _bytes_allocated_in_old_since_last_gc = 0; 1028 1029 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 1030 1031 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1032 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1033 1034 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1035 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 1036 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 1037 update_rs_time_goal_ms, scan_hcc_time_ms); 1038 1039 update_rs_time_goal_ms = 0; 1040 } else { 1041 update_rs_time_goal_ms -= scan_hcc_time_ms; 1042 } 1043 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1044 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1045 update_rs_time_goal_ms); 1046 1047 cset_chooser()->verify(); 1048 } 1049 1050 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1051 if (G1UseAdaptiveIHOP) { 1052 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1053 &_predictor, 1054 G1ReservePercent, 1055 G1HeapWastePercent); 1056 } else { 1057 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 1058 } 1059 } 1060 1061 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 1062 size_t mutator_alloc_bytes, 1063 size_t young_gen_size) { 1064 // Always try to update IHOP prediction. Even evacuation failures give information 1065 // about e.g. whether to start IHOP earlier next time. 1066 1067 // Avoid using really small application times that might create samples with 1068 // very high or very low values. They may be caused by e.g. back-to-back gcs. 1069 double const min_valid_time = 1e-6; 1070 1071 bool report = false; 1072 1073 double marking_to_mixed_time = -1.0; 1074 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1075 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1076 assert(marking_to_mixed_time > 0.0, 1077 "Initial mark to mixed time must be larger than zero but is %.3f", 1078 marking_to_mixed_time); 1079 if (marking_to_mixed_time > min_valid_time) { 1080 _ihop_control->update_marking_length(marking_to_mixed_time); 1081 report = true; 1082 } 1083 } 1084 1085 // As an approximation for the young gc promotion rates during marking we use 1086 // all of them. In many applications there are only a few if any young gcs during 1087 // marking, which makes any prediction useless. This increases the accuracy of the 1088 // prediction. 1089 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1090 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1091 report = true; 1092 } 1093 1094 if (report) { 1095 report_ihop_statistics(); 1096 } 1097 } 1098 1099 void G1CollectorPolicy::report_ihop_statistics() { 1100 _ihop_control->print(); 1101 } 1102 1103 void G1CollectorPolicy::print_phases() { 1104 phase_times()->print(); 1105 } 1106 1107 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1108 double update_rs_processed_buffers, 1109 double goal_ms) { 1110 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1111 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1112 1113 if (G1UseAdaptiveConcRefinement) { 1114 const int k_gy = 3, k_gr = 6; 1115 const double inc_k = 1.1, dec_k = 0.9; 1116 1117 size_t g = cg1r->green_zone(); 1118 if (update_rs_time > goal_ms) { 1119 g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1120 } else { 1121 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1122 g = (size_t)MAX2(g * inc_k, g + 1.0); 1123 } 1124 } 1125 // Change the refinement threads params 1126 cg1r->set_green_zone(g); 1127 cg1r->set_yellow_zone(g * k_gy); 1128 cg1r->set_red_zone(g * k_gr); 1129 cg1r->reinitialize_threads(); 1130 1131 size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1); 1132 size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1133 cg1r->yellow_zone()); 1134 // Change the barrier params 1135 dcqs.set_process_completed_threshold((int)processing_threshold); 1136 dcqs.set_max_completed_queue((int)cg1r->red_zone()); 1137 } 1138 1139 size_t curr_queue_size = dcqs.completed_buffers_num(); 1140 if (curr_queue_size >= cg1r->yellow_zone()) { 1141 dcqs.set_completed_queue_padding(curr_queue_size); 1142 } else { 1143 dcqs.set_completed_queue_padding(0); 1144 } 1145 dcqs.notify_if_necessary(); 1146 } 1147 1148 size_t G1CollectorPolicy::predict_rs_lengths() const { 1149 return get_new_size_prediction(_rs_lengths_seq); 1150 } 1151 1152 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1153 return get_new_size_prediction(_rs_length_diff_seq); 1154 } 1155 1156 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1157 return get_new_prediction(_alloc_rate_ms_seq); 1158 } 1159 1160 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1161 return get_new_prediction(_cost_per_card_ms_seq); 1162 } 1163 1164 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1165 return get_new_prediction(_cost_scan_hcc_seq); 1166 } 1167 1168 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1169 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1170 } 1171 1172 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1173 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1174 } 1175 1176 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1177 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1178 return predict_young_cards_per_entry_ratio(); 1179 } else { 1180 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1181 } 1182 } 1183 1184 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1185 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1186 } 1187 1188 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1189 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1190 } 1191 1192 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1193 if (collector_state()->gcs_are_young()) { 1194 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1195 } else { 1196 return predict_mixed_rs_scan_time_ms(card_num); 1197 } 1198 } 1199 1200 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1201 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1202 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1203 } else { 1204 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1205 } 1206 } 1207 1208 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1209 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1210 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1211 } else { 1212 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1213 } 1214 } 1215 1216 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1217 if (collector_state()->during_concurrent_mark()) { 1218 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1219 } else { 1220 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1221 } 1222 } 1223 1224 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1225 return get_new_prediction(_constant_other_time_ms_seq); 1226 } 1227 1228 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1229 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1230 } 1231 1232 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1233 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1234 } 1235 1236 double G1CollectorPolicy::predict_remark_time_ms() const { 1237 return get_new_prediction(_concurrent_mark_remark_times_ms); 1238 } 1239 1240 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1241 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1242 } 1243 1244 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1245 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1246 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1247 double pred = get_new_prediction(seq); 1248 if (pred > 1.0) { 1249 pred = 1.0; 1250 } 1251 return pred; 1252 } 1253 1254 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1255 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1256 } 1257 1258 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1259 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1260 } 1261 1262 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1263 size_t scanned_cards) const { 1264 return 1265 predict_rs_update_time_ms(pending_cards) + 1266 predict_rs_scan_time_ms(scanned_cards) + 1267 predict_constant_other_time_ms(); 1268 } 1269 1270 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1271 size_t rs_length = predict_rs_lengths() + predict_rs_length_diff(); 1272 size_t card_num; 1273 if (collector_state()->gcs_are_young()) { 1274 card_num = predict_young_card_num(rs_length); 1275 } else { 1276 card_num = predict_non_young_card_num(rs_length); 1277 } 1278 return predict_base_elapsed_time_ms(pending_cards, card_num); 1279 } 1280 1281 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1282 size_t bytes_to_copy; 1283 if (hr->is_marked()) 1284 bytes_to_copy = hr->max_live_bytes(); 1285 else { 1286 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1287 int age = hr->age_in_surv_rate_group(); 1288 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1289 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1290 } 1291 return bytes_to_copy; 1292 } 1293 1294 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1295 bool for_young_gc) const { 1296 size_t rs_length = hr->rem_set()->occupied(); 1297 size_t card_num; 1298 1299 // Predicting the number of cards is based on which type of GC 1300 // we're predicting for. 1301 if (for_young_gc) { 1302 card_num = predict_young_card_num(rs_length); 1303 } else { 1304 card_num = predict_non_young_card_num(rs_length); 1305 } 1306 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1307 1308 double region_elapsed_time_ms = 1309 predict_rs_scan_time_ms(card_num) + 1310 predict_object_copy_time_ms(bytes_to_copy); 1311 1312 // The prediction of the "other" time for this region is based 1313 // upon the region type and NOT the GC type. 1314 if (hr->is_young()) { 1315 region_elapsed_time_ms += predict_young_other_time_ms(1); 1316 } else { 1317 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1318 } 1319 return region_elapsed_time_ms; 1320 } 1321 1322 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1323 double elapsed_ms) { 1324 _recent_gc_times_ms->add(elapsed_ms); 1325 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1326 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1327 } 1328 1329 void G1CollectorPolicy::clear_ratio_check_data() { 1330 _ratio_over_threshold_count = 0; 1331 _ratio_over_threshold_sum = 0.0; 1332 _pauses_since_start = 0; 1333 } 1334 1335 size_t G1CollectorPolicy::expansion_amount() { 1336 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1337 double last_gc_overhead = _last_pause_time_ratio * 100.0; 1338 double threshold = _gc_overhead_perc; 1339 size_t expand_bytes = 0; 1340 1341 // If the heap is at less than half its maximum size, scale the threshold down, 1342 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1343 // though the scaling code will likely keep the increase small. 1344 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1345 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1346 threshold = MAX2(threshold, 1.0); 1347 } 1348 1349 // If the last GC time ratio is over the threshold, increment the count of 1350 // times it has been exceeded, and add this ratio to the sum of exceeded 1351 // ratios. 1352 if (last_gc_overhead > threshold) { 1353 _ratio_over_threshold_count++; 1354 _ratio_over_threshold_sum += last_gc_overhead; 1355 } 1356 1357 // Check if we've had enough GC time ratio checks that were over the 1358 // threshold to trigger an expansion. We'll also expand if we've 1359 // reached the end of the history buffer and the average of all entries 1360 // is still over the threshold. This indicates a smaller number of GCs were 1361 // long enough to make the average exceed the threshold. 1362 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1363 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1364 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1365 size_t min_expand_bytes = HeapRegion::GrainBytes; 1366 size_t reserved_bytes = _g1->max_capacity(); 1367 size_t committed_bytes = _g1->capacity(); 1368 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1369 size_t expand_bytes_via_pct = 1370 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1371 double scale_factor = 1.0; 1372 1373 // If the current size is less than 1/4 of the Initial heap size, expand 1374 // by half of the delta between the current and Initial sizes. IE, grow 1375 // back quickly. 1376 // 1377 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1378 // the available expansion space, whichever is smaller, as the base 1379 // expansion size. Then possibly scale this size according to how much the 1380 // threshold has (on average) been exceeded by. If the delta is small 1381 // (less than the StartScaleDownAt value), scale the size down linearly, but 1382 // not by less than MinScaleDownFactor. If the delta is large (greater than 1383 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1384 // times the base size. The scaling will be linear in the range from 1385 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1386 // ScaleUpRange sets the rate of scaling up. 1387 if (committed_bytes < InitialHeapSize / 4) { 1388 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1389 } else { 1390 double const MinScaleDownFactor = 0.2; 1391 double const MaxScaleUpFactor = 2; 1392 double const StartScaleDownAt = _gc_overhead_perc; 1393 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1394 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1395 1396 double ratio_delta; 1397 if (filled_history_buffer) { 1398 ratio_delta = recent_gc_overhead - threshold; 1399 } else { 1400 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1401 } 1402 1403 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1404 if (ratio_delta < StartScaleDownAt) { 1405 scale_factor = ratio_delta / StartScaleDownAt; 1406 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1407 } else if (ratio_delta > StartScaleUpAt) { 1408 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1409 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1410 } 1411 } 1412 1413 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1414 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1415 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1416 1417 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1418 1419 // Ensure the expansion size is at least the minimum growth amount 1420 // and at most the remaining uncommitted byte size. 1421 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1422 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1423 1424 clear_ratio_check_data(); 1425 } else { 1426 // An expansion was not triggered. If we've started counting, increment 1427 // the number of checks we've made in the current window. If we've 1428 // reached the end of the window without resizing, clear the counters to 1429 // start again the next time we see a ratio above the threshold. 1430 if (_ratio_over_threshold_count > 0) { 1431 _pauses_since_start++; 1432 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1433 clear_ratio_check_data(); 1434 } 1435 } 1436 } 1437 1438 return expand_bytes; 1439 } 1440 1441 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1442 #ifndef PRODUCT 1443 _short_lived_surv_rate_group->print_surv_rate_summary(); 1444 // add this call for any other surv rate groups 1445 #endif // PRODUCT 1446 } 1447 1448 bool G1CollectorPolicy::is_young_list_full() const { 1449 uint young_list_length = _g1->young_list()->length(); 1450 uint young_list_target_length = _young_list_target_length; 1451 return young_list_length >= young_list_target_length; 1452 } 1453 1454 bool G1CollectorPolicy::can_expand_young_list() const { 1455 uint young_list_length = _g1->young_list()->length(); 1456 uint young_list_max_length = _young_list_max_length; 1457 return young_list_length < young_list_max_length; 1458 } 1459 1460 bool G1CollectorPolicy::adaptive_young_list_length() const { 1461 return _young_gen_sizer->adaptive_young_list_length(); 1462 } 1463 1464 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1465 uint expansion_region_num = 0; 1466 if (GCLockerEdenExpansionPercent > 0) { 1467 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1468 double expansion_region_num_d = perc * (double) _young_list_target_length; 1469 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1470 // less than 1.0) we'll get 1. 1471 expansion_region_num = (uint) ceil(expansion_region_num_d); 1472 } else { 1473 assert(expansion_region_num == 0, "sanity"); 1474 } 1475 _young_list_max_length = _young_list_target_length + expansion_region_num; 1476 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1477 } 1478 1479 // Calculates survivor space parameters. 1480 void G1CollectorPolicy::update_survivors_policy() { 1481 double max_survivor_regions_d = 1482 (double) _young_list_target_length / (double) SurvivorRatio; 1483 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1484 // smaller than 1.0) we'll get 1. 1485 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1486 1487 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1488 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1489 } 1490 1491 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1492 // We actually check whether we are marking here and not if we are in a 1493 // reclamation phase. This means that we will schedule a concurrent mark 1494 // even while we are still in the process of reclaiming memory. 1495 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1496 if (!during_cycle) { 1497 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1498 collector_state()->set_initiate_conc_mark_if_possible(true); 1499 return true; 1500 } else { 1501 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1502 return false; 1503 } 1504 } 1505 1506 void G1CollectorPolicy::initiate_conc_mark() { 1507 collector_state()->set_during_initial_mark_pause(true); 1508 collector_state()->set_initiate_conc_mark_if_possible(false); 1509 } 1510 1511 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1512 // We are about to decide on whether this pause will be an 1513 // initial-mark pause. 1514 1515 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1516 // will set it here if we have to. However, it should be cleared by 1517 // the end of the pause (it's only set for the duration of an 1518 // initial-mark pause). 1519 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1520 1521 if (collector_state()->initiate_conc_mark_if_possible()) { 1522 // We had noticed on a previous pause that the heap occupancy has 1523 // gone over the initiating threshold and we should start a 1524 // concurrent marking cycle. So we might initiate one. 1525 1526 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1527 // Initiate a new initial mark if there is no marking or reclamation going on. 1528 initiate_conc_mark(); 1529 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1530 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1531 // Initiate a user requested initial mark. An initial mark must be young only 1532 // GC, so the collector state must be updated to reflect this. 1533 collector_state()->set_gcs_are_young(true); 1534 collector_state()->set_last_young_gc(false); 1535 1536 abort_time_to_mixed_tracking(); 1537 initiate_conc_mark(); 1538 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1539 } else { 1540 // The concurrent marking thread is still finishing up the 1541 // previous cycle. If we start one right now the two cycles 1542 // overlap. In particular, the concurrent marking thread might 1543 // be in the process of clearing the next marking bitmap (which 1544 // we will use for the next cycle if we start one). Starting a 1545 // cycle now will be bad given that parts of the marking 1546 // information might get cleared by the marking thread. And we 1547 // cannot wait for the marking thread to finish the cycle as it 1548 // periodically yields while clearing the next marking bitmap 1549 // and, if it's in a yield point, it's waiting for us to 1550 // finish. So, at this point we will not start a cycle and we'll 1551 // let the concurrent marking thread complete the last one. 1552 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1553 } 1554 } 1555 } 1556 1557 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1558 G1CollectedHeap* _g1h; 1559 CSetChooserParUpdater _cset_updater; 1560 1561 public: 1562 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1563 uint chunk_size) : 1564 _g1h(G1CollectedHeap::heap()), 1565 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1566 1567 bool doHeapRegion(HeapRegion* r) { 1568 // Do we have any marking information for this region? 1569 if (r->is_marked()) { 1570 // We will skip any region that's currently used as an old GC 1571 // alloc region (we should not consider those for collection 1572 // before we fill them up). 1573 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1574 _cset_updater.add_region(r); 1575 } 1576 } 1577 return false; 1578 } 1579 }; 1580 1581 class ParKnownGarbageTask: public AbstractGangTask { 1582 CollectionSetChooser* _hrSorted; 1583 uint _chunk_size; 1584 G1CollectedHeap* _g1; 1585 HeapRegionClaimer _hrclaimer; 1586 1587 public: 1588 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1589 AbstractGangTask("ParKnownGarbageTask"), 1590 _hrSorted(hrSorted), _chunk_size(chunk_size), 1591 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1592 1593 void work(uint worker_id) { 1594 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1595 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1596 } 1597 }; 1598 1599 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1600 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1601 const uint overpartition_factor = 4; 1602 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1603 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1604 } 1605 1606 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1607 cset_chooser()->clear(); 1608 1609 WorkGang* workers = _g1->workers(); 1610 uint n_workers = workers->active_workers(); 1611 1612 uint n_regions = _g1->num_regions(); 1613 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1614 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1615 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1616 workers->run_task(&par_known_garbage_task); 1617 1618 cset_chooser()->sort_regions(); 1619 1620 double end_sec = os::elapsedTime(); 1621 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1622 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1623 _prev_collection_pause_end_ms += elapsed_time_ms; 1624 1625 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1626 } 1627 1628 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 1629 // Returns the given amount of reclaimable bytes (that represents 1630 // the amount of reclaimable space still to be collected) as a 1631 // percentage of the current heap capacity. 1632 size_t capacity_bytes = _g1->capacity(); 1633 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1634 } 1635 1636 void G1CollectorPolicy::maybe_start_marking() { 1637 if (need_to_start_conc_mark("end of GC")) { 1638 // Note: this might have already been set, if during the last 1639 // pause we decided to start a cycle but at the beginning of 1640 // this pause we decided to postpone it. That's OK. 1641 collector_state()->set_initiate_conc_mark_if_possible(true); 1642 } 1643 } 1644 1645 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 1646 assert(!collector_state()->full_collection(), "must be"); 1647 if (collector_state()->during_initial_mark_pause()) { 1648 assert(collector_state()->last_gc_was_young(), "must be"); 1649 assert(!collector_state()->last_young_gc(), "must be"); 1650 return InitialMarkGC; 1651 } else if (collector_state()->last_young_gc()) { 1652 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1653 assert(collector_state()->last_gc_was_young(), "must be"); 1654 return LastYoungGC; 1655 } else if (!collector_state()->last_gc_was_young()) { 1656 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1657 assert(!collector_state()->last_young_gc(), "must be"); 1658 return MixedGC; 1659 } else { 1660 assert(collector_state()->last_gc_was_young(), "must be"); 1661 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1662 assert(!collector_state()->last_young_gc(), "must be"); 1663 return YoungOnlyGC; 1664 } 1665 } 1666 1667 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 1668 // Manage the MMU tracker. For some reason it ignores Full GCs. 1669 if (kind != FullGC) { 1670 _mmu_tracker->add_pause(start, end); 1671 } 1672 // Manage the mutator time tracking from initial mark to first mixed gc. 1673 switch (kind) { 1674 case FullGC: 1675 abort_time_to_mixed_tracking(); 1676 break; 1677 case Cleanup: 1678 case Remark: 1679 case YoungOnlyGC: 1680 case LastYoungGC: 1681 _initial_mark_to_mixed.add_pause(end - start); 1682 break; 1683 case InitialMarkGC: 1684 _initial_mark_to_mixed.record_initial_mark_end(end); 1685 break; 1686 case MixedGC: 1687 _initial_mark_to_mixed.record_mixed_gc_start(start); 1688 break; 1689 default: 1690 ShouldNotReachHere(); 1691 } 1692 } 1693 1694 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 1695 _initial_mark_to_mixed.reset(); 1696 } 1697 1698 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1699 const char* false_action_str) const { 1700 if (cset_chooser()->is_empty()) { 1701 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1702 return false; 1703 } 1704 1705 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1706 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 1707 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1708 double threshold = (double) G1HeapWastePercent; 1709 if (reclaimable_perc <= threshold) { 1710 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1711 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1712 return false; 1713 } 1714 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1715 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1716 return true; 1717 } 1718 1719 uint G1CollectorPolicy::calc_min_old_cset_length() const { 1720 // The min old CSet region bound is based on the maximum desired 1721 // number of mixed GCs after a cycle. I.e., even if some old regions 1722 // look expensive, we should add them to the CSet anyway to make 1723 // sure we go through the available old regions in no more than the 1724 // maximum desired number of mixed GCs. 1725 // 1726 // The calculation is based on the number of marked regions we added 1727 // to the CSet chooser in the first place, not how many remain, so 1728 // that the result is the same during all mixed GCs that follow a cycle. 1729 1730 const size_t region_num = (size_t) cset_chooser()->length(); 1731 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1732 size_t result = region_num / gc_num; 1733 // emulate ceiling 1734 if (result * gc_num < region_num) { 1735 result += 1; 1736 } 1737 return (uint) result; 1738 } 1739 1740 uint G1CollectorPolicy::calc_max_old_cset_length() const { 1741 // The max old CSet region bound is based on the threshold expressed 1742 // as a percentage of the heap size. I.e., it should bound the 1743 // number of old regions added to the CSet irrespective of how many 1744 // of them are available. 1745 1746 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1747 const size_t region_num = g1h->num_regions(); 1748 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1749 size_t result = region_num * perc / 100; 1750 // emulate ceiling 1751 if (100 * result < region_num * perc) { 1752 result += 1; 1753 } 1754 return (uint) result; 1755 } 1756 1757 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { 1758 double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); 1759 _collection_set->finalize_old_part(time_remaining_ms); 1760 } 1761