1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1IHOPControl.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1YoungGenSizer.hpp" 35 #include "gc/g1/heapRegion.inline.hpp" 36 #include "gc/g1/heapRegionRemSet.hpp" 37 #include "gc/shared/gcPolicyCounters.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "utilities/debug.hpp" 42 #include "utilities/pair.hpp" 43 44 // Different defaults for different number of GC threads 45 // They were chosen by running GCOld and SPECjbb on debris with different 46 // numbers of GC threads and choosing them based on the results 47 48 // all the same 49 static double rs_length_diff_defaults[] = { 50 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 51 }; 52 53 static double cost_per_card_ms_defaults[] = { 54 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 55 }; 56 57 // all the same 58 static double young_cards_per_entry_ratio_defaults[] = { 59 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 60 }; 61 62 static double cost_per_entry_ms_defaults[] = { 63 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 64 }; 65 66 static double cost_per_byte_ms_defaults[] = { 67 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 68 }; 69 70 // these should be pretty consistent 71 static double constant_other_time_ms_defaults[] = { 72 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 73 }; 74 75 76 static double young_other_cost_per_region_ms_defaults[] = { 77 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 78 }; 79 80 static double non_young_other_cost_per_region_ms_defaults[] = { 81 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 82 }; 83 84 G1CollectorPolicy::G1CollectorPolicy() : 85 _predictor(G1ConfidencePercent / 100.0), 86 87 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 88 89 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 91 92 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 93 _prev_collection_pause_end_ms(0.0), 94 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 105 _non_young_other_cost_per_region_ms_seq( 106 new TruncatedSeq(TruncatedSeqLength)), 107 108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 110 111 _pause_time_target_ms((double) MaxGCPauseMillis), 112 113 _recent_prev_end_times_for_all_gcs_sec( 114 new TruncatedSeq(NumPrevPausesForHeuristics)), 115 116 _recent_avg_pause_time_ratio(0.0), 117 _rs_lengths_prediction(0), 118 _max_survivor_regions(0), 119 120 // add here any more surv rate groups 121 _recorded_survivor_regions(0), 122 _recorded_survivor_head(NULL), 123 _recorded_survivor_tail(NULL), 124 _survivors_age_table(true), 125 126 _gc_overhead_perc(0.0), 127 128 _bytes_allocated_in_old_since_last_gc(0), 129 _ihop_control(NULL), 130 _initial_mark_to_mixed() { 131 132 // SurvRateGroups below must be initialized after the predictor because they 133 // indirectly use it through this object passed to their constructor. 134 _short_lived_surv_rate_group = 135 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 136 _survivor_surv_rate_group = 137 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 138 139 // Set up the region size and associated fields. Given that the 140 // policy is created before the heap, we have to set this up here, 141 // so it's done as soon as possible. 142 143 // It would have been natural to pass initial_heap_byte_size() and 144 // max_heap_byte_size() to setup_heap_region_size() but those have 145 // not been set up at this point since they should be aligned with 146 // the region size. So, there is a circular dependency here. We base 147 // the region size on the heap size, but the heap size should be 148 // aligned with the region size. To get around this we use the 149 // unaligned values for the heap. 150 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 151 HeapRegionRemSet::setup_remset_size(); 152 153 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 154 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 155 clear_ratio_check_data(); 156 157 _phase_times = new G1GCPhaseTimes(ParallelGCThreads); 158 159 int index = MIN2(ParallelGCThreads - 1, 7u); 160 161 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 162 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 163 _cost_scan_hcc_seq->add(0.0); 164 _young_cards_per_entry_ratio_seq->add( 165 young_cards_per_entry_ratio_defaults[index]); 166 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 167 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 168 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 169 _young_other_cost_per_region_ms_seq->add( 170 young_other_cost_per_region_ms_defaults[index]); 171 _non_young_other_cost_per_region_ms_seq->add( 172 non_young_other_cost_per_region_ms_defaults[index]); 173 174 // Below, we might need to calculate the pause time target based on 175 // the pause interval. When we do so we are going to give G1 maximum 176 // flexibility and allow it to do pauses when it needs to. So, we'll 177 // arrange that the pause interval to be pause time target + 1 to 178 // ensure that a) the pause time target is maximized with respect to 179 // the pause interval and b) we maintain the invariant that pause 180 // time target < pause interval. If the user does not want this 181 // maximum flexibility, they will have to set the pause interval 182 // explicitly. 183 184 // First make sure that, if either parameter is set, its value is 185 // reasonable. 186 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 187 if (MaxGCPauseMillis < 1) { 188 vm_exit_during_initialization("MaxGCPauseMillis should be " 189 "greater than 0"); 190 } 191 } 192 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 193 if (GCPauseIntervalMillis < 1) { 194 vm_exit_during_initialization("GCPauseIntervalMillis should be " 195 "greater than 0"); 196 } 197 } 198 199 // Then, if the pause time target parameter was not set, set it to 200 // the default value. 201 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 202 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 203 // The default pause time target in G1 is 200ms 204 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 205 } else { 206 // We do not allow the pause interval to be set without the 207 // pause time target 208 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 209 "without setting MaxGCPauseMillis"); 210 } 211 } 212 213 // Then, if the interval parameter was not set, set it according to 214 // the pause time target (this will also deal with the case when the 215 // pause time target is the default value). 216 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 217 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 218 } 219 220 // Finally, make sure that the two parameters are consistent. 221 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 222 char buffer[256]; 223 jio_snprintf(buffer, 256, 224 "MaxGCPauseMillis (%u) should be less than " 225 "GCPauseIntervalMillis (%u)", 226 MaxGCPauseMillis, GCPauseIntervalMillis); 227 vm_exit_during_initialization(buffer); 228 } 229 230 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 231 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 232 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 233 234 // start conservatively (around 50ms is about right) 235 _concurrent_mark_remark_times_ms->add(0.05); 236 _concurrent_mark_cleanup_times_ms->add(0.20); 237 _tenuring_threshold = MaxTenuringThreshold; 238 239 assert(GCTimeRatio > 0, 240 "we should have set it to a default value set_g1_gc_flags() " 241 "if a user set it to 0"); 242 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 243 244 uintx reserve_perc = G1ReservePercent; 245 // Put an artificial ceiling on this so that it's not set to a silly value. 246 if (reserve_perc > 50) { 247 reserve_perc = 50; 248 warning("G1ReservePercent is set to a value that is too large, " 249 "it's been updated to " UINTX_FORMAT, reserve_perc); 250 } 251 _reserve_factor = (double) reserve_perc / 100.0; 252 // This will be set when the heap is expanded 253 // for the first time during initialization. 254 _reserve_regions = 0; 255 256 _ihop_control = create_ihop_control(); 257 } 258 259 G1CollectorPolicy::~G1CollectorPolicy() { 260 delete _ihop_control; 261 } 262 263 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 264 return _predictor.get_new_prediction(seq); 265 } 266 267 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { 268 return (size_t)get_new_prediction(seq); 269 } 270 271 void G1CollectorPolicy::initialize_alignments() { 272 _space_alignment = HeapRegion::GrainBytes; 273 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 274 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 275 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 276 } 277 278 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 279 280 void G1CollectorPolicy::post_heap_initialize() { 281 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 282 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 283 if (max_young_size != MaxNewSize) { 284 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 285 } 286 } 287 288 void G1CollectorPolicy::initialize_flags() { 289 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 290 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 291 } 292 293 if (SurvivorRatio < 1) { 294 vm_exit_during_initialization("Invalid survivor ratio specified"); 295 } 296 CollectorPolicy::initialize_flags(); 297 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 298 } 299 300 301 void G1CollectorPolicy::init() { 302 // Set aside an initial future to_space. 303 _g1 = G1CollectedHeap::heap(); 304 _collection_set = _g1->collection_set(); 305 _collection_set->set_policy(this); 306 307 assert(Heap_lock->owned_by_self(), "Locking discipline."); 308 309 initialize_gc_policy_counters(); 310 311 if (adaptive_young_list_length()) { 312 _young_list_fixed_length = 0; 313 } else { 314 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 315 } 316 _free_regions_at_end_of_collection = _g1->num_free_regions(); 317 318 update_young_list_max_and_target_length(); 319 // We may immediately start allocating regions and placing them on the 320 // collection set list. Initialize the per-collection set info 321 _collection_set->start_incremental_building(); 322 } 323 324 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 325 phase_times()->note_gc_start(num_active_workers); 326 } 327 328 // Create the jstat counters for the policy. 329 void G1CollectorPolicy::initialize_gc_policy_counters() { 330 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 331 } 332 333 bool G1CollectorPolicy::predict_will_fit(uint young_length, 334 double base_time_ms, 335 uint base_free_regions, 336 double target_pause_time_ms) const { 337 if (young_length >= base_free_regions) { 338 // end condition 1: not enough space for the young regions 339 return false; 340 } 341 342 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 343 size_t bytes_to_copy = 344 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 345 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 346 double young_other_time_ms = predict_young_other_time_ms(young_length); 347 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 348 if (pause_time_ms > target_pause_time_ms) { 349 // end condition 2: prediction is over the target pause time 350 return false; 351 } 352 353 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 354 355 // When copying, we will likely need more bytes free than is live in the region. 356 // Add some safety margin to factor in the confidence of our guess, and the 357 // natural expected waste. 358 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 359 // of the calculation: the lower the confidence, the more headroom. 360 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 361 // copying due to anticipated waste in the PLABs. 362 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 363 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 364 365 if (expected_bytes_to_copy > free_bytes) { 366 // end condition 3: out-of-space 367 return false; 368 } 369 370 // success! 371 return true; 372 } 373 374 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 375 // re-calculate the necessary reserve 376 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 377 // We use ceiling so that if reserve_regions_d is > 0.0 (but 378 // smaller than 1.0) we'll get 1. 379 _reserve_regions = (uint) ceil(reserve_regions_d); 380 381 _young_gen_sizer->heap_size_changed(new_number_of_regions); 382 383 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 384 } 385 386 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 387 uint base_min_length) const { 388 uint desired_min_length = 0; 389 if (adaptive_young_list_length()) { 390 if (_alloc_rate_ms_seq->num() > 3) { 391 double now_sec = os::elapsedTime(); 392 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 393 double alloc_rate_ms = predict_alloc_rate_ms(); 394 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 395 } else { 396 // otherwise we don't have enough info to make the prediction 397 } 398 } 399 desired_min_length += base_min_length; 400 // make sure we don't go below any user-defined minimum bound 401 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 402 } 403 404 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 405 // Here, we might want to also take into account any additional 406 // constraints (i.e., user-defined minimum bound). Currently, we 407 // effectively don't set this bound. 408 return _young_gen_sizer->max_desired_young_length(); 409 } 410 411 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 412 return update_young_list_max_and_target_length(get_new_size_prediction(_rs_lengths_seq)); 413 } 414 415 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 416 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 417 update_max_gc_locker_expansion(); 418 return unbounded_target_length; 419 } 420 421 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 422 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 423 _young_list_target_length = young_lengths.first; 424 return young_lengths.second; 425 } 426 427 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 428 YoungTargetLengths result; 429 430 // Calculate the absolute and desired min bounds first. 431 432 // This is how many young regions we already have (currently: the survivors). 433 uint base_min_length = recorded_survivor_regions(); 434 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 435 // This is the absolute minimum young length. Ensure that we 436 // will at least have one eden region available for allocation. 437 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 438 // If we shrank the young list target it should not shrink below the current size. 439 desired_min_length = MAX2(desired_min_length, absolute_min_length); 440 // Calculate the absolute and desired max bounds. 441 442 uint desired_max_length = calculate_young_list_desired_max_length(); 443 444 uint young_list_target_length = 0; 445 if (adaptive_young_list_length()) { 446 if (collector_state()->gcs_are_young()) { 447 young_list_target_length = 448 calculate_young_list_target_length(rs_lengths, 449 base_min_length, 450 desired_min_length, 451 desired_max_length); 452 } else { 453 // Don't calculate anything and let the code below bound it to 454 // the desired_min_length, i.e., do the next GC as soon as 455 // possible to maximize how many old regions we can add to it. 456 } 457 } else { 458 // The user asked for a fixed young gen so we'll fix the young gen 459 // whether the next GC is young or mixed. 460 young_list_target_length = _young_list_fixed_length; 461 } 462 463 result.second = young_list_target_length; 464 465 // We will try our best not to "eat" into the reserve. 466 uint absolute_max_length = 0; 467 if (_free_regions_at_end_of_collection > _reserve_regions) { 468 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 469 } 470 if (desired_max_length > absolute_max_length) { 471 desired_max_length = absolute_max_length; 472 } 473 474 // Make sure we don't go over the desired max length, nor under the 475 // desired min length. In case they clash, desired_min_length wins 476 // which is why that test is second. 477 if (young_list_target_length > desired_max_length) { 478 young_list_target_length = desired_max_length; 479 } 480 if (young_list_target_length < desired_min_length) { 481 young_list_target_length = desired_min_length; 482 } 483 484 assert(young_list_target_length > recorded_survivor_regions(), 485 "we should be able to allocate at least one eden region"); 486 assert(young_list_target_length >= absolute_min_length, "post-condition"); 487 488 result.first = young_list_target_length; 489 return result; 490 } 491 492 uint 493 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 494 uint base_min_length, 495 uint desired_min_length, 496 uint desired_max_length) const { 497 assert(adaptive_young_list_length(), "pre-condition"); 498 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 499 500 // In case some edge-condition makes the desired max length too small... 501 if (desired_max_length <= desired_min_length) { 502 return desired_min_length; 503 } 504 505 // We'll adjust min_young_length and max_young_length not to include 506 // the already allocated young regions (i.e., so they reflect the 507 // min and max eden regions we'll allocate). The base_min_length 508 // will be reflected in the predictions by the 509 // survivor_regions_evac_time prediction. 510 assert(desired_min_length > base_min_length, "invariant"); 511 uint min_young_length = desired_min_length - base_min_length; 512 assert(desired_max_length > base_min_length, "invariant"); 513 uint max_young_length = desired_max_length - base_min_length; 514 515 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 516 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 517 size_t pending_cards = get_new_size_prediction(_pending_cards_seq); 518 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 519 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 520 double base_time_ms = 521 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 522 survivor_regions_evac_time; 523 uint available_free_regions = _free_regions_at_end_of_collection; 524 uint base_free_regions = 0; 525 if (available_free_regions > _reserve_regions) { 526 base_free_regions = available_free_regions - _reserve_regions; 527 } 528 529 // Here, we will make sure that the shortest young length that 530 // makes sense fits within the target pause time. 531 532 if (predict_will_fit(min_young_length, base_time_ms, 533 base_free_regions, target_pause_time_ms)) { 534 // The shortest young length will fit into the target pause time; 535 // we'll now check whether the absolute maximum number of young 536 // regions will fit in the target pause time. If not, we'll do 537 // a binary search between min_young_length and max_young_length. 538 if (predict_will_fit(max_young_length, base_time_ms, 539 base_free_regions, target_pause_time_ms)) { 540 // The maximum young length will fit into the target pause time. 541 // We are done so set min young length to the maximum length (as 542 // the result is assumed to be returned in min_young_length). 543 min_young_length = max_young_length; 544 } else { 545 // The maximum possible number of young regions will not fit within 546 // the target pause time so we'll search for the optimal 547 // length. The loop invariants are: 548 // 549 // min_young_length < max_young_length 550 // min_young_length is known to fit into the target pause time 551 // max_young_length is known not to fit into the target pause time 552 // 553 // Going into the loop we know the above hold as we've just 554 // checked them. Every time around the loop we check whether 555 // the middle value between min_young_length and 556 // max_young_length fits into the target pause time. If it 557 // does, it becomes the new min. If it doesn't, it becomes 558 // the new max. This way we maintain the loop invariants. 559 560 assert(min_young_length < max_young_length, "invariant"); 561 uint diff = (max_young_length - min_young_length) / 2; 562 while (diff > 0) { 563 uint young_length = min_young_length + diff; 564 if (predict_will_fit(young_length, base_time_ms, 565 base_free_regions, target_pause_time_ms)) { 566 min_young_length = young_length; 567 } else { 568 max_young_length = young_length; 569 } 570 assert(min_young_length < max_young_length, "invariant"); 571 diff = (max_young_length - min_young_length) / 2; 572 } 573 // The results is min_young_length which, according to the 574 // loop invariants, should fit within the target pause time. 575 576 // These are the post-conditions of the binary search above: 577 assert(min_young_length < max_young_length, 578 "otherwise we should have discovered that max_young_length " 579 "fits into the pause target and not done the binary search"); 580 assert(predict_will_fit(min_young_length, base_time_ms, 581 base_free_regions, target_pause_time_ms), 582 "min_young_length, the result of the binary search, should " 583 "fit into the pause target"); 584 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 585 base_free_regions, target_pause_time_ms), 586 "min_young_length, the result of the binary search, should be " 587 "optimal, so no larger length should fit into the pause target"); 588 } 589 } else { 590 // Even the minimum length doesn't fit into the pause time 591 // target, return it as the result nevertheless. 592 } 593 return base_min_length + min_young_length; 594 } 595 596 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 597 double survivor_regions_evac_time = 0.0; 598 for (HeapRegion * r = _recorded_survivor_head; 599 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 600 r = r->get_next_young_region()) { 601 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 602 } 603 return survivor_regions_evac_time; 604 } 605 606 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { 607 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 608 609 if (rs_lengths > _rs_lengths_prediction) { 610 // add 10% to avoid having to recalculate often 611 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 612 update_rs_lengths_prediction(rs_lengths_prediction); 613 614 update_young_list_max_and_target_length(rs_lengths_prediction); 615 } 616 } 617 618 void G1CollectorPolicy::update_rs_lengths_prediction() { 619 update_rs_lengths_prediction(get_new_size_prediction(_rs_lengths_seq)); 620 } 621 622 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 623 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 624 _rs_lengths_prediction = prediction; 625 } 626 } 627 628 #ifndef PRODUCT 629 bool G1CollectorPolicy::verify_young_ages() { 630 HeapRegion* head = _g1->young_list()->first_region(); 631 return 632 verify_young_ages(head, _short_lived_surv_rate_group); 633 // also call verify_young_ages on any additional surv rate groups 634 } 635 636 bool 637 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 638 SurvRateGroup *surv_rate_group) { 639 guarantee( surv_rate_group != NULL, "pre-condition" ); 640 641 const char* name = surv_rate_group->name(); 642 bool ret = true; 643 int prev_age = -1; 644 645 for (HeapRegion* curr = head; 646 curr != NULL; 647 curr = curr->get_next_young_region()) { 648 SurvRateGroup* group = curr->surv_rate_group(); 649 if (group == NULL && !curr->is_survivor()) { 650 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); 651 ret = false; 652 } 653 654 if (surv_rate_group == group) { 655 int age = curr->age_in_surv_rate_group(); 656 657 if (age < 0) { 658 log_error(gc, verify)("## %s: encountered negative age", name); 659 ret = false; 660 } 661 662 if (age <= prev_age) { 663 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 664 ret = false; 665 } 666 prev_age = age; 667 } 668 } 669 670 return ret; 671 } 672 #endif // PRODUCT 673 674 void G1CollectorPolicy::record_full_collection_start() { 675 _full_collection_start_sec = os::elapsedTime(); 676 // Release the future to-space so that it is available for compaction into. 677 collector_state()->set_full_collection(true); 678 } 679 680 void G1CollectorPolicy::record_full_collection_end() { 681 // Consider this like a collection pause for the purposes of allocation 682 // since last pause. 683 double end_sec = os::elapsedTime(); 684 double full_gc_time_sec = end_sec - _full_collection_start_sec; 685 double full_gc_time_ms = full_gc_time_sec * 1000.0; 686 687 update_recent_gc_times(end_sec, full_gc_time_ms); 688 689 collector_state()->set_full_collection(false); 690 691 // "Nuke" the heuristics that control the young/mixed GC 692 // transitions and make sure we start with young GCs after the Full GC. 693 collector_state()->set_gcs_are_young(true); 694 collector_state()->set_last_young_gc(false); 695 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 696 collector_state()->set_during_initial_mark_pause(false); 697 collector_state()->set_in_marking_window(false); 698 collector_state()->set_in_marking_window_im(false); 699 700 _short_lived_surv_rate_group->start_adding_regions(); 701 // also call this on any additional surv rate groups 702 703 record_survivor_regions(0, NULL, NULL); 704 705 _free_regions_at_end_of_collection = _g1->num_free_regions(); 706 // Reset survivors SurvRateGroup. 707 _survivor_surv_rate_group->reset(); 708 update_young_list_max_and_target_length(); 709 update_rs_lengths_prediction(); 710 cset_chooser()->clear(); 711 712 _bytes_allocated_in_old_since_last_gc = 0; 713 714 record_pause(FullGC, _full_collection_start_sec, end_sec); 715 } 716 717 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 718 // We only need to do this here as the policy will only be applied 719 // to the GC we're about to start. so, no point is calculating this 720 // every time we calculate / recalculate the target young length. 721 update_survivors_policy(); 722 723 assert(_g1->used() == _g1->recalculate_used(), 724 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 725 _g1->used(), _g1->recalculate_used()); 726 727 phase_times()->record_cur_collection_start_sec(start_time_sec); 728 _pending_cards = _g1->pending_card_num(); 729 730 _collection_set->reset_bytes_used_before(); 731 _bytes_copied_during_gc = 0; 732 733 collector_state()->set_last_gc_was_young(false); 734 735 // do that for any other surv rate groups 736 _short_lived_surv_rate_group->stop_adding_regions(); 737 _survivors_age_table.clear(); 738 739 assert( verify_young_ages(), "region age verification" ); 740 } 741 742 void G1CollectorPolicy::record_concurrent_mark_init_end(double 743 mark_init_elapsed_time_ms) { 744 collector_state()->set_during_marking(true); 745 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 746 collector_state()->set_during_initial_mark_pause(false); 747 } 748 749 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 750 _mark_remark_start_sec = os::elapsedTime(); 751 collector_state()->set_during_marking(false); 752 } 753 754 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 755 double end_time_sec = os::elapsedTime(); 756 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 757 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 758 _prev_collection_pause_end_ms += elapsed_time_ms; 759 760 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 761 } 762 763 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 764 _mark_cleanup_start_sec = os::elapsedTime(); 765 } 766 767 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 768 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 769 "skip last young-only gc"); 770 collector_state()->set_last_young_gc(should_continue_with_reclaim); 771 // We skip the marking phase. 772 if (!should_continue_with_reclaim) { 773 abort_time_to_mixed_tracking(); 774 } 775 collector_state()->set_in_marking_window(false); 776 } 777 778 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 779 return phase_times()->average_time_ms(phase); 780 } 781 782 double G1CollectorPolicy::young_other_time_ms() const { 783 return phase_times()->young_cset_choice_time_ms() + 784 phase_times()->young_free_cset_time_ms(); 785 } 786 787 double G1CollectorPolicy::non_young_other_time_ms() const { 788 return phase_times()->non_young_cset_choice_time_ms() + 789 phase_times()->non_young_free_cset_time_ms(); 790 791 } 792 793 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 794 return pause_time_ms - 795 average_time_ms(G1GCPhaseTimes::UpdateRS) - 796 average_time_ms(G1GCPhaseTimes::ScanRS) - 797 average_time_ms(G1GCPhaseTimes::ObjCopy) - 798 average_time_ms(G1GCPhaseTimes::Termination); 799 } 800 801 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 802 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 803 } 804 805 CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { 806 return _collection_set->cset_chooser(); 807 } 808 809 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 810 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 811 } 812 813 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 814 if (about_to_start_mixed_phase()) { 815 return false; 816 } 817 818 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 819 820 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 821 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 822 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 823 824 bool result = false; 825 if (marking_request_bytes > marking_initiating_used_threshold) { 826 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 827 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 828 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 829 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 830 } 831 832 return result; 833 } 834 835 // Anything below that is considered to be zero 836 #define MIN_TIMER_GRANULARITY 0.0000001 837 838 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 839 double end_time_sec = os::elapsedTime(); 840 841 size_t cur_used_bytes = _g1->used(); 842 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 843 bool last_pause_included_initial_mark = false; 844 bool update_stats = !_g1->evacuation_failed(); 845 846 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 847 848 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 849 850 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 851 if (last_pause_included_initial_mark) { 852 record_concurrent_mark_init_end(0.0); 853 } else { 854 maybe_start_marking(); 855 } 856 857 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 858 if (app_time_ms < MIN_TIMER_GRANULARITY) { 859 // This usually happens due to the timer not having the required 860 // granularity. Some Linuxes are the usual culprits. 861 // We'll just set it to something (arbitrarily) small. 862 app_time_ms = 1.0; 863 } 864 865 if (update_stats) { 866 // We maintain the invariant that all objects allocated by mutator 867 // threads will be allocated out of eden regions. So, we can use 868 // the eden region number allocated since the previous GC to 869 // calculate the application's allocate rate. The only exception 870 // to that is humongous objects that are allocated separately. But 871 // given that humongous object allocations do not really affect 872 // either the pause's duration nor when the next pause will take 873 // place we can safely ignore them here. 874 uint regions_allocated = _collection_set->eden_region_length(); 875 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 876 _alloc_rate_ms_seq->add(alloc_rate_ms); 877 878 double interval_ms = 879 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 880 update_recent_gc_times(end_time_sec, pause_time_ms); 881 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 882 if (recent_avg_pause_time_ratio() < 0.0 || 883 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 884 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 885 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 886 if (_recent_avg_pause_time_ratio < 0.0) { 887 _recent_avg_pause_time_ratio = 0.0; 888 } else { 889 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 890 _recent_avg_pause_time_ratio = 1.0; 891 } 892 } 893 894 // Compute the ratio of just this last pause time to the entire time range stored 895 // in the vectors. Comparing this pause to the entire range, rather than only the 896 // most recent interval, has the effect of smoothing over a possible transient 'burst' 897 // of more frequent pauses that don't really reflect a change in heap occupancy. 898 // This reduces the likelihood of a needless heap expansion being triggered. 899 _last_pause_time_ratio = 900 (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; 901 } 902 903 bool new_in_marking_window = collector_state()->in_marking_window(); 904 bool new_in_marking_window_im = false; 905 if (last_pause_included_initial_mark) { 906 new_in_marking_window = true; 907 new_in_marking_window_im = true; 908 } 909 910 if (collector_state()->last_young_gc()) { 911 // This is supposed to to be the "last young GC" before we start 912 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 913 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 914 915 if (next_gc_should_be_mixed("start mixed GCs", 916 "do not start mixed GCs")) { 917 collector_state()->set_gcs_are_young(false); 918 } else { 919 // We aborted the mixed GC phase early. 920 abort_time_to_mixed_tracking(); 921 } 922 923 collector_state()->set_last_young_gc(false); 924 } 925 926 if (!collector_state()->last_gc_was_young()) { 927 // This is a mixed GC. Here we decide whether to continue doing 928 // mixed GCs or not. 929 if (!next_gc_should_be_mixed("continue mixed GCs", 930 "do not continue mixed GCs")) { 931 collector_state()->set_gcs_are_young(true); 932 933 maybe_start_marking(); 934 } 935 } 936 937 _short_lived_surv_rate_group->start_adding_regions(); 938 // Do that for any other surv rate groups 939 940 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; 941 942 if (update_stats) { 943 double cost_per_card_ms = 0.0; 944 if (_pending_cards > 0) { 945 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; 946 _cost_per_card_ms_seq->add(cost_per_card_ms); 947 } 948 _cost_scan_hcc_seq->add(scan_hcc_time_ms); 949 950 double cost_per_entry_ms = 0.0; 951 if (cards_scanned > 10) { 952 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 953 if (collector_state()->last_gc_was_young()) { 954 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 955 } else { 956 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 957 } 958 } 959 960 if (_max_rs_lengths > 0) { 961 double cards_per_entry_ratio = 962 (double) cards_scanned / (double) _max_rs_lengths; 963 if (collector_state()->last_gc_was_young()) { 964 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 965 } else { 966 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 967 } 968 } 969 970 // This is defensive. For a while _max_rs_lengths could get 971 // smaller than _recorded_rs_lengths which was causing 972 // rs_length_diff to get very large and mess up the RSet length 973 // predictions. The reason was unsafe concurrent updates to the 974 // _inc_cset_recorded_rs_lengths field which the code below guards 975 // against (see CR 7118202). This bug has now been fixed (see CR 976 // 7119027). However, I'm still worried that 977 // _inc_cset_recorded_rs_lengths might still end up somewhat 978 // inaccurate. The concurrent refinement thread calculates an 979 // RSet's length concurrently with other CR threads updating it 980 // which might cause it to calculate the length incorrectly (if, 981 // say, it's in mid-coarsening). So I'll leave in the defensive 982 // conditional below just in case. 983 size_t rs_length_diff = 0; 984 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); 985 if (_max_rs_lengths > recorded_rs_lengths) { 986 rs_length_diff = _max_rs_lengths - recorded_rs_lengths; 987 } 988 _rs_length_diff_seq->add((double) rs_length_diff); 989 990 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 991 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; 992 double cost_per_byte_ms = 0.0; 993 994 if (copied_bytes > 0) { 995 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 996 if (collector_state()->in_marking_window()) { 997 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 998 } else { 999 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1000 } 1001 } 1002 1003 if (_collection_set->young_region_length() > 0) { 1004 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 1005 _collection_set->young_region_length()); 1006 } 1007 1008 if (_collection_set->old_region_length() > 0) { 1009 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 1010 _collection_set->old_region_length()); 1011 } 1012 1013 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 1014 1015 _pending_cards_seq->add((double) _pending_cards); 1016 _rs_lengths_seq->add((double) _max_rs_lengths); 1017 } 1018 1019 collector_state()->set_in_marking_window(new_in_marking_window); 1020 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 1021 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1022 // IHOP control wants to know the expected young gen length if it were not 1023 // restrained by the heap reserve. Using the actual length would make the 1024 // prediction too small and the limit the young gen every time we get to the 1025 // predicted target occupancy. 1026 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 1027 update_rs_lengths_prediction(); 1028 1029 update_ihop_prediction(app_time_ms / 1000.0, 1030 _bytes_allocated_in_old_since_last_gc, 1031 last_unrestrained_young_length * HeapRegion::GrainBytes); 1032 _bytes_allocated_in_old_since_last_gc = 0; 1033 1034 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 1035 1036 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1037 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1038 1039 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1040 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 1041 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 1042 update_rs_time_goal_ms, scan_hcc_time_ms); 1043 1044 update_rs_time_goal_ms = 0; 1045 } else { 1046 update_rs_time_goal_ms -= scan_hcc_time_ms; 1047 } 1048 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1049 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1050 update_rs_time_goal_ms); 1051 1052 cset_chooser()->verify(); 1053 } 1054 1055 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1056 if (G1UseAdaptiveIHOP) { 1057 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1058 &_predictor, 1059 G1ReservePercent, 1060 G1HeapWastePercent); 1061 } else { 1062 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 1063 } 1064 } 1065 1066 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 1067 size_t mutator_alloc_bytes, 1068 size_t young_gen_size) { 1069 // Always try to update IHOP prediction. Even evacuation failures give information 1070 // about e.g. whether to start IHOP earlier next time. 1071 1072 // Avoid using really small application times that might create samples with 1073 // very high or very low values. They may be caused by e.g. back-to-back gcs. 1074 double const min_valid_time = 1e-6; 1075 1076 bool report = false; 1077 1078 double marking_to_mixed_time = -1.0; 1079 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1080 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1081 assert(marking_to_mixed_time > 0.0, 1082 "Initial mark to mixed time must be larger than zero but is %.3f", 1083 marking_to_mixed_time); 1084 if (marking_to_mixed_time > min_valid_time) { 1085 _ihop_control->update_marking_length(marking_to_mixed_time); 1086 report = true; 1087 } 1088 } 1089 1090 // As an approximation for the young gc promotion rates during marking we use 1091 // all of them. In many applications there are only a few if any young gcs during 1092 // marking, which makes any prediction useless. This increases the accuracy of the 1093 // prediction. 1094 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1095 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1096 report = true; 1097 } 1098 1099 if (report) { 1100 report_ihop_statistics(); 1101 } 1102 } 1103 1104 void G1CollectorPolicy::report_ihop_statistics() { 1105 _ihop_control->print(); 1106 } 1107 1108 void G1CollectorPolicy::print_phases() { 1109 phase_times()->print(); 1110 } 1111 1112 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1113 double update_rs_processed_buffers, 1114 double goal_ms) { 1115 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1116 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1117 1118 if (G1UseAdaptiveConcRefinement) { 1119 const int k_gy = 3, k_gr = 6; 1120 const double inc_k = 1.1, dec_k = 0.9; 1121 1122 size_t g = cg1r->green_zone(); 1123 if (update_rs_time > goal_ms) { 1124 g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1125 } else { 1126 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1127 g = (size_t)MAX2(g * inc_k, g + 1.0); 1128 } 1129 } 1130 // Change the refinement threads params 1131 cg1r->set_green_zone(g); 1132 cg1r->set_yellow_zone(g * k_gy); 1133 cg1r->set_red_zone(g * k_gr); 1134 cg1r->reinitialize_threads(); 1135 1136 size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1); 1137 size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1138 cg1r->yellow_zone()); 1139 // Change the barrier params 1140 dcqs.set_process_completed_threshold((int)processing_threshold); 1141 dcqs.set_max_completed_queue((int)cg1r->red_zone()); 1142 } 1143 1144 size_t curr_queue_size = dcqs.completed_buffers_num(); 1145 if (curr_queue_size >= cg1r->yellow_zone()) { 1146 dcqs.set_completed_queue_padding(curr_queue_size); 1147 } else { 1148 dcqs.set_completed_queue_padding(0); 1149 } 1150 dcqs.notify_if_necessary(); 1151 } 1152 1153 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1154 return get_new_size_prediction(_rs_length_diff_seq); 1155 } 1156 1157 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1158 return get_new_prediction(_alloc_rate_ms_seq); 1159 } 1160 1161 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1162 return get_new_prediction(_cost_per_card_ms_seq); 1163 } 1164 1165 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1166 return get_new_prediction(_cost_scan_hcc_seq); 1167 } 1168 1169 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1170 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1171 } 1172 1173 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1174 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1175 } 1176 1177 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1178 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1179 return predict_young_cards_per_entry_ratio(); 1180 } else { 1181 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1182 } 1183 } 1184 1185 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1186 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1187 } 1188 1189 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1190 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1191 } 1192 1193 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1194 if (collector_state()->gcs_are_young()) { 1195 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1196 } else { 1197 return predict_mixed_rs_scan_time_ms(card_num); 1198 } 1199 } 1200 1201 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1202 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1203 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1204 } else { 1205 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1206 } 1207 } 1208 1209 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1210 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1211 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1212 } else { 1213 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1214 } 1215 } 1216 1217 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1218 if (collector_state()->during_concurrent_mark()) { 1219 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1220 } else { 1221 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1222 } 1223 } 1224 1225 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1226 return get_new_prediction(_constant_other_time_ms_seq); 1227 } 1228 1229 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1230 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1231 } 1232 1233 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1234 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1235 } 1236 1237 double G1CollectorPolicy::predict_remark_time_ms() const { 1238 return get_new_prediction(_concurrent_mark_remark_times_ms); 1239 } 1240 1241 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1242 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1243 } 1244 1245 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1246 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1247 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1248 double pred = get_new_prediction(seq); 1249 if (pred > 1.0) { 1250 pred = 1.0; 1251 } 1252 return pred; 1253 } 1254 1255 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1256 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1257 } 1258 1259 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1260 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1261 } 1262 1263 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1264 size_t scanned_cards) const { 1265 return 1266 predict_rs_update_time_ms(pending_cards) + 1267 predict_rs_scan_time_ms(scanned_cards) + 1268 predict_constant_other_time_ms(); 1269 } 1270 1271 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1272 size_t rs_length = predict_rs_length_diff(); 1273 size_t card_num; 1274 if (collector_state()->gcs_are_young()) { 1275 card_num = predict_young_card_num(rs_length); 1276 } else { 1277 card_num = predict_non_young_card_num(rs_length); 1278 } 1279 return predict_base_elapsed_time_ms(pending_cards, card_num); 1280 } 1281 1282 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1283 size_t bytes_to_copy; 1284 if (hr->is_marked()) 1285 bytes_to_copy = hr->max_live_bytes(); 1286 else { 1287 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1288 int age = hr->age_in_surv_rate_group(); 1289 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1290 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1291 } 1292 return bytes_to_copy; 1293 } 1294 1295 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1296 bool for_young_gc) const { 1297 size_t rs_length = hr->rem_set()->occupied(); 1298 size_t card_num; 1299 1300 // Predicting the number of cards is based on which type of GC 1301 // we're predicting for. 1302 if (for_young_gc) { 1303 card_num = predict_young_card_num(rs_length); 1304 } else { 1305 card_num = predict_non_young_card_num(rs_length); 1306 } 1307 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1308 1309 double region_elapsed_time_ms = 1310 predict_rs_scan_time_ms(card_num) + 1311 predict_object_copy_time_ms(bytes_to_copy); 1312 1313 // The prediction of the "other" time for this region is based 1314 // upon the region type and NOT the GC type. 1315 if (hr->is_young()) { 1316 region_elapsed_time_ms += predict_young_other_time_ms(1); 1317 } else { 1318 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1319 } 1320 return region_elapsed_time_ms; 1321 } 1322 1323 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1324 double elapsed_ms) { 1325 _recent_gc_times_ms->add(elapsed_ms); 1326 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1327 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1328 } 1329 1330 void G1CollectorPolicy::clear_ratio_check_data() { 1331 _ratio_over_threshold_count = 0; 1332 _ratio_over_threshold_sum = 0.0; 1333 _pauses_since_start = 0; 1334 } 1335 1336 size_t G1CollectorPolicy::expansion_amount() { 1337 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1338 double last_gc_overhead = _last_pause_time_ratio * 100.0; 1339 double threshold = _gc_overhead_perc; 1340 size_t expand_bytes = 0; 1341 1342 // If the heap is at less than half its maximum size, scale the threshold down, 1343 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1344 // though the scaling code will likely keep the increase small. 1345 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1346 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1347 threshold = MAX2(threshold, 1.0); 1348 } 1349 1350 // If the last GC time ratio is over the threshold, increment the count of 1351 // times it has been exceeded, and add this ratio to the sum of exceeded 1352 // ratios. 1353 if (last_gc_overhead > threshold) { 1354 _ratio_over_threshold_count++; 1355 _ratio_over_threshold_sum += last_gc_overhead; 1356 } 1357 1358 // Check if we've had enough GC time ratio checks that were over the 1359 // threshold to trigger an expansion. We'll also expand if we've 1360 // reached the end of the history buffer and the average of all entries 1361 // is still over the threshold. This indicates a smaller number of GCs were 1362 // long enough to make the average exceed the threshold. 1363 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1364 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1365 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1366 size_t min_expand_bytes = HeapRegion::GrainBytes; 1367 size_t reserved_bytes = _g1->max_capacity(); 1368 size_t committed_bytes = _g1->capacity(); 1369 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1370 size_t expand_bytes_via_pct = 1371 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1372 double scale_factor = 1.0; 1373 1374 // If the current size is less than 1/4 of the Initial heap size, expand 1375 // by half of the delta between the current and Initial sizes. IE, grow 1376 // back quickly. 1377 // 1378 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1379 // the available expansion space, whichever is smaller, as the base 1380 // expansion size. Then possibly scale this size according to how much the 1381 // threshold has (on average) been exceeded by. If the delta is small 1382 // (less than the StartScaleDownAt value), scale the size down linearly, but 1383 // not by less than MinScaleDownFactor. If the delta is large (greater than 1384 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1385 // times the base size. The scaling will be linear in the range from 1386 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1387 // ScaleUpRange sets the rate of scaling up. 1388 if (committed_bytes < InitialHeapSize / 4) { 1389 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1390 } else { 1391 double const MinScaleDownFactor = 0.2; 1392 double const MaxScaleUpFactor = 2; 1393 double const StartScaleDownAt = _gc_overhead_perc; 1394 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1395 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1396 1397 double ratio_delta; 1398 if (filled_history_buffer) { 1399 ratio_delta = recent_gc_overhead - threshold; 1400 } else { 1401 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1402 } 1403 1404 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1405 if (ratio_delta < StartScaleDownAt) { 1406 scale_factor = ratio_delta / StartScaleDownAt; 1407 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1408 } else if (ratio_delta > StartScaleUpAt) { 1409 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1410 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1411 } 1412 } 1413 1414 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1415 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1416 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1417 1418 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1419 1420 // Ensure the expansion size is at least the minimum growth amount 1421 // and at most the remaining uncommitted byte size. 1422 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1423 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1424 1425 clear_ratio_check_data(); 1426 } else { 1427 // An expansion was not triggered. If we've started counting, increment 1428 // the number of checks we've made in the current window. If we've 1429 // reached the end of the window without resizing, clear the counters to 1430 // start again the next time we see a ratio above the threshold. 1431 if (_ratio_over_threshold_count > 0) { 1432 _pauses_since_start++; 1433 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1434 clear_ratio_check_data(); 1435 } 1436 } 1437 } 1438 1439 return expand_bytes; 1440 } 1441 1442 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1443 #ifndef PRODUCT 1444 _short_lived_surv_rate_group->print_surv_rate_summary(); 1445 // add this call for any other surv rate groups 1446 #endif // PRODUCT 1447 } 1448 1449 bool G1CollectorPolicy::is_young_list_full() const { 1450 uint young_list_length = _g1->young_list()->length(); 1451 uint young_list_target_length = _young_list_target_length; 1452 return young_list_length >= young_list_target_length; 1453 } 1454 1455 bool G1CollectorPolicy::can_expand_young_list() const { 1456 uint young_list_length = _g1->young_list()->length(); 1457 uint young_list_max_length = _young_list_max_length; 1458 return young_list_length < young_list_max_length; 1459 } 1460 1461 bool G1CollectorPolicy::adaptive_young_list_length() const { 1462 return _young_gen_sizer->adaptive_young_list_length(); 1463 } 1464 1465 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1466 uint expansion_region_num = 0; 1467 if (GCLockerEdenExpansionPercent > 0) { 1468 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1469 double expansion_region_num_d = perc * (double) _young_list_target_length; 1470 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1471 // less than 1.0) we'll get 1. 1472 expansion_region_num = (uint) ceil(expansion_region_num_d); 1473 } else { 1474 assert(expansion_region_num == 0, "sanity"); 1475 } 1476 _young_list_max_length = _young_list_target_length + expansion_region_num; 1477 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1478 } 1479 1480 // Calculates survivor space parameters. 1481 void G1CollectorPolicy::update_survivors_policy() { 1482 double max_survivor_regions_d = 1483 (double) _young_list_target_length / (double) SurvivorRatio; 1484 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1485 // smaller than 1.0) we'll get 1. 1486 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1487 1488 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1489 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1490 } 1491 1492 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1493 // We actually check whether we are marking here and not if we are in a 1494 // reclamation phase. This means that we will schedule a concurrent mark 1495 // even while we are still in the process of reclaiming memory. 1496 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1497 if (!during_cycle) { 1498 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1499 collector_state()->set_initiate_conc_mark_if_possible(true); 1500 return true; 1501 } else { 1502 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1503 return false; 1504 } 1505 } 1506 1507 void G1CollectorPolicy::initiate_conc_mark() { 1508 collector_state()->set_during_initial_mark_pause(true); 1509 collector_state()->set_initiate_conc_mark_if_possible(false); 1510 } 1511 1512 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1513 // We are about to decide on whether this pause will be an 1514 // initial-mark pause. 1515 1516 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1517 // will set it here if we have to. However, it should be cleared by 1518 // the end of the pause (it's only set for the duration of an 1519 // initial-mark pause). 1520 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1521 1522 if (collector_state()->initiate_conc_mark_if_possible()) { 1523 // We had noticed on a previous pause that the heap occupancy has 1524 // gone over the initiating threshold and we should start a 1525 // concurrent marking cycle. So we might initiate one. 1526 1527 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1528 // Initiate a new initial mark if there is no marking or reclamation going on. 1529 initiate_conc_mark(); 1530 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1531 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1532 // Initiate a user requested initial mark. An initial mark must be young only 1533 // GC, so the collector state must be updated to reflect this. 1534 collector_state()->set_gcs_are_young(true); 1535 collector_state()->set_last_young_gc(false); 1536 1537 abort_time_to_mixed_tracking(); 1538 initiate_conc_mark(); 1539 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1540 } else { 1541 // The concurrent marking thread is still finishing up the 1542 // previous cycle. If we start one right now the two cycles 1543 // overlap. In particular, the concurrent marking thread might 1544 // be in the process of clearing the next marking bitmap (which 1545 // we will use for the next cycle if we start one). Starting a 1546 // cycle now will be bad given that parts of the marking 1547 // information might get cleared by the marking thread. And we 1548 // cannot wait for the marking thread to finish the cycle as it 1549 // periodically yields while clearing the next marking bitmap 1550 // and, if it's in a yield point, it's waiting for us to 1551 // finish. So, at this point we will not start a cycle and we'll 1552 // let the concurrent marking thread complete the last one. 1553 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1554 } 1555 } 1556 } 1557 1558 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1559 G1CollectedHeap* _g1h; 1560 CSetChooserParUpdater _cset_updater; 1561 1562 public: 1563 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1564 uint chunk_size) : 1565 _g1h(G1CollectedHeap::heap()), 1566 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1567 1568 bool doHeapRegion(HeapRegion* r) { 1569 // Do we have any marking information for this region? 1570 if (r->is_marked()) { 1571 // We will skip any region that's currently used as an old GC 1572 // alloc region (we should not consider those for collection 1573 // before we fill them up). 1574 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1575 _cset_updater.add_region(r); 1576 } 1577 } 1578 return false; 1579 } 1580 }; 1581 1582 class ParKnownGarbageTask: public AbstractGangTask { 1583 CollectionSetChooser* _hrSorted; 1584 uint _chunk_size; 1585 G1CollectedHeap* _g1; 1586 HeapRegionClaimer _hrclaimer; 1587 1588 public: 1589 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1590 AbstractGangTask("ParKnownGarbageTask"), 1591 _hrSorted(hrSorted), _chunk_size(chunk_size), 1592 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1593 1594 void work(uint worker_id) { 1595 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1596 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1597 } 1598 }; 1599 1600 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1601 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1602 const uint overpartition_factor = 4; 1603 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1604 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1605 } 1606 1607 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1608 cset_chooser()->clear(); 1609 1610 WorkGang* workers = _g1->workers(); 1611 uint n_workers = workers->active_workers(); 1612 1613 uint n_regions = _g1->num_regions(); 1614 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1615 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1616 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1617 workers->run_task(&par_known_garbage_task); 1618 1619 cset_chooser()->sort_regions(); 1620 1621 double end_sec = os::elapsedTime(); 1622 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1623 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1624 _prev_collection_pause_end_ms += elapsed_time_ms; 1625 1626 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1627 } 1628 1629 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 1630 // Returns the given amount of reclaimable bytes (that represents 1631 // the amount of reclaimable space still to be collected) as a 1632 // percentage of the current heap capacity. 1633 size_t capacity_bytes = _g1->capacity(); 1634 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1635 } 1636 1637 void G1CollectorPolicy::maybe_start_marking() { 1638 if (need_to_start_conc_mark("end of GC")) { 1639 // Note: this might have already been set, if during the last 1640 // pause we decided to start a cycle but at the beginning of 1641 // this pause we decided to postpone it. That's OK. 1642 collector_state()->set_initiate_conc_mark_if_possible(true); 1643 } 1644 } 1645 1646 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 1647 assert(!collector_state()->full_collection(), "must be"); 1648 if (collector_state()->during_initial_mark_pause()) { 1649 assert(collector_state()->last_gc_was_young(), "must be"); 1650 assert(!collector_state()->last_young_gc(), "must be"); 1651 return InitialMarkGC; 1652 } else if (collector_state()->last_young_gc()) { 1653 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1654 assert(collector_state()->last_gc_was_young(), "must be"); 1655 return LastYoungGC; 1656 } else if (!collector_state()->last_gc_was_young()) { 1657 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1658 assert(!collector_state()->last_young_gc(), "must be"); 1659 return MixedGC; 1660 } else { 1661 assert(collector_state()->last_gc_was_young(), "must be"); 1662 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1663 assert(!collector_state()->last_young_gc(), "must be"); 1664 return YoungOnlyGC; 1665 } 1666 } 1667 1668 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 1669 // Manage the MMU tracker. For some reason it ignores Full GCs. 1670 if (kind != FullGC) { 1671 _mmu_tracker->add_pause(start, end); 1672 } 1673 // Manage the mutator time tracking from initial mark to first mixed gc. 1674 switch (kind) { 1675 case FullGC: 1676 abort_time_to_mixed_tracking(); 1677 break; 1678 case Cleanup: 1679 case Remark: 1680 case YoungOnlyGC: 1681 case LastYoungGC: 1682 _initial_mark_to_mixed.add_pause(end - start); 1683 break; 1684 case InitialMarkGC: 1685 _initial_mark_to_mixed.record_initial_mark_end(end); 1686 break; 1687 case MixedGC: 1688 _initial_mark_to_mixed.record_mixed_gc_start(start); 1689 break; 1690 default: 1691 ShouldNotReachHere(); 1692 } 1693 } 1694 1695 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 1696 _initial_mark_to_mixed.reset(); 1697 } 1698 1699 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1700 const char* false_action_str) const { 1701 if (cset_chooser()->is_empty()) { 1702 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1703 return false; 1704 } 1705 1706 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1707 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 1708 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1709 double threshold = (double) G1HeapWastePercent; 1710 if (reclaimable_perc <= threshold) { 1711 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1712 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1713 return false; 1714 } 1715 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1716 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1717 return true; 1718 } 1719 1720 uint G1CollectorPolicy::calc_min_old_cset_length() const { 1721 // The min old CSet region bound is based on the maximum desired 1722 // number of mixed GCs after a cycle. I.e., even if some old regions 1723 // look expensive, we should add them to the CSet anyway to make 1724 // sure we go through the available old regions in no more than the 1725 // maximum desired number of mixed GCs. 1726 // 1727 // The calculation is based on the number of marked regions we added 1728 // to the CSet chooser in the first place, not how many remain, so 1729 // that the result is the same during all mixed GCs that follow a cycle. 1730 1731 const size_t region_num = (size_t) cset_chooser()->length(); 1732 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1733 size_t result = region_num / gc_num; 1734 // emulate ceiling 1735 if (result * gc_num < region_num) { 1736 result += 1; 1737 } 1738 return (uint) result; 1739 } 1740 1741 uint G1CollectorPolicy::calc_max_old_cset_length() const { 1742 // The max old CSet region bound is based on the threshold expressed 1743 // as a percentage of the heap size. I.e., it should bound the 1744 // number of old regions added to the CSet irrespective of how many 1745 // of them are available. 1746 1747 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1748 const size_t region_num = g1h->num_regions(); 1749 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1750 size_t result = region_num * perc / 100; 1751 // emulate ceiling 1752 if (100 * result < region_num * perc) { 1753 result += 1; 1754 } 1755 return (uint) result; 1756 } 1757 1758 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { 1759 double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); 1760 _collection_set->finalize_old_part(time_remaining_ms); 1761 } 1762