1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1Analytics.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectionSet.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1ConcurrentMark.hpp" 33 #include "gc/g1/g1IHOPControl.hpp" 34 #include "gc/g1/g1GCPhaseTimes.hpp" 35 #include "gc/g1/g1YoungGenSizer.hpp" 36 #include "gc/g1/heapRegion.inline.hpp" 37 #include "gc/g1/heapRegionRemSet.hpp" 38 #include "gc/shared/gcPolicyCounters.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/java.hpp" 41 #include "runtime/mutexLocker.hpp" 42 #include "utilities/debug.hpp" 43 #include "utilities/pair.hpp" 44 45 G1CollectorPolicy::G1CollectorPolicy() : 46 _predictor(G1ConfidencePercent / 100.0), 47 _analytics(new G1Analytics(&_predictor)), 48 _pause_time_target_ms((double) MaxGCPauseMillis), 49 _rs_lengths_prediction(0), 50 _max_survivor_regions(0), 51 _survivors_age_table(true), 52 _gc_overhead_perc(0.0), 53 54 _bytes_allocated_in_old_since_last_gc(0), 55 _ihop_control(NULL), 56 _initial_mark_to_mixed() { 57 58 // SurvRateGroups below must be initialized after the predictor because they 59 // indirectly use it through this object passed to their constructor. 60 _short_lived_surv_rate_group = 61 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 62 _survivor_surv_rate_group = 63 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 64 65 // Set up the region size and associated fields. Given that the 66 // policy is created before the heap, we have to set this up here, 67 // so it's done as soon as possible. 68 69 // It would have been natural to pass initial_heap_byte_size() and 70 // max_heap_byte_size() to setup_heap_region_size() but those have 71 // not been set up at this point since they should be aligned with 72 // the region size. So, there is a circular dependency here. We base 73 // the region size on the heap size, but the heap size should be 74 // aligned with the region size. To get around this we use the 75 // unaligned values for the heap. 76 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 77 HeapRegionRemSet::setup_remset_size(); 78 79 clear_ratio_check_data(); 80 81 _phase_times = new G1GCPhaseTimes(ParallelGCThreads); 82 83 // Below, we might need to calculate the pause time target based on 84 // the pause interval. When we do so we are going to give G1 maximum 85 // flexibility and allow it to do pauses when it needs to. So, we'll 86 // arrange that the pause interval to be pause time target + 1 to 87 // ensure that a) the pause time target is maximized with respect to 88 // the pause interval and b) we maintain the invariant that pause 89 // time target < pause interval. If the user does not want this 90 // maximum flexibility, they will have to set the pause interval 91 // explicitly. 92 93 // First make sure that, if either parameter is set, its value is 94 // reasonable. 95 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 96 if (MaxGCPauseMillis < 1) { 97 vm_exit_during_initialization("MaxGCPauseMillis should be " 98 "greater than 0"); 99 } 100 } 101 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 102 if (GCPauseIntervalMillis < 1) { 103 vm_exit_during_initialization("GCPauseIntervalMillis should be " 104 "greater than 0"); 105 } 106 } 107 108 // Then, if the pause time target parameter was not set, set it to 109 // the default value. 110 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 111 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 112 // The default pause time target in G1 is 200ms 113 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 114 } else { 115 // We do not allow the pause interval to be set without the 116 // pause time target 117 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 118 "without setting MaxGCPauseMillis"); 119 } 120 } 121 122 // Then, if the interval parameter was not set, set it according to 123 // the pause time target (this will also deal with the case when the 124 // pause time target is the default value). 125 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 126 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 127 } 128 129 // Finally, make sure that the two parameters are consistent. 130 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 131 char buffer[256]; 132 jio_snprintf(buffer, 256, 133 "MaxGCPauseMillis (%u) should be less than " 134 "GCPauseIntervalMillis (%u)", 135 MaxGCPauseMillis, GCPauseIntervalMillis); 136 vm_exit_during_initialization(buffer); 137 } 138 139 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 140 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 141 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 142 143 _tenuring_threshold = MaxTenuringThreshold; 144 145 assert(GCTimeRatio > 0, 146 "we should have set it to a default value set_g1_gc_flags() " 147 "if a user set it to 0"); 148 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 149 150 uintx reserve_perc = G1ReservePercent; 151 // Put an artificial ceiling on this so that it's not set to a silly value. 152 if (reserve_perc > 50) { 153 reserve_perc = 50; 154 warning("G1ReservePercent is set to a value that is too large, " 155 "it's been updated to " UINTX_FORMAT, reserve_perc); 156 } 157 _reserve_factor = (double) reserve_perc / 100.0; 158 // This will be set when the heap is expanded 159 // for the first time during initialization. 160 _reserve_regions = 0; 161 162 _ihop_control = create_ihop_control(); 163 } 164 165 G1CollectorPolicy::~G1CollectorPolicy() { 166 delete _ihop_control; 167 } 168 169 void G1CollectorPolicy::initialize_alignments() { 170 _space_alignment = HeapRegion::GrainBytes; 171 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 172 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 173 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 174 } 175 176 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 177 178 void G1CollectorPolicy::post_heap_initialize() { 179 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 180 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 181 if (max_young_size != MaxNewSize) { 182 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 183 } 184 } 185 186 void G1CollectorPolicy::initialize_flags() { 187 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 188 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 189 } 190 191 if (SurvivorRatio < 1) { 192 vm_exit_during_initialization("Invalid survivor ratio specified"); 193 } 194 CollectorPolicy::initialize_flags(); 195 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 196 } 197 198 199 void G1CollectorPolicy::init() { 200 // Set aside an initial future to_space. 201 _g1 = G1CollectedHeap::heap(); 202 _collection_set = _g1->collection_set(); 203 _collection_set->set_policy(this); 204 205 assert(Heap_lock->owned_by_self(), "Locking discipline."); 206 207 initialize_gc_policy_counters(); 208 209 if (adaptive_young_list_length()) { 210 _young_list_fixed_length = 0; 211 } else { 212 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 213 } 214 _free_regions_at_end_of_collection = _g1->num_free_regions(); 215 216 update_young_list_max_and_target_length(); 217 // We may immediately start allocating regions and placing them on the 218 // collection set list. Initialize the per-collection set info 219 _collection_set->start_incremental_building(); 220 } 221 222 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 223 phase_times()->note_gc_start(num_active_workers); 224 } 225 226 // Create the jstat counters for the policy. 227 void G1CollectorPolicy::initialize_gc_policy_counters() { 228 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 229 } 230 231 bool G1CollectorPolicy::predict_will_fit(uint young_length, 232 double base_time_ms, 233 uint base_free_regions, 234 double target_pause_time_ms) const { 235 if (young_length >= base_free_regions) { 236 // end condition 1: not enough space for the young regions 237 return false; 238 } 239 240 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 241 size_t bytes_to_copy = 242 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 243 double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy, 244 collector_state()->during_concurrent_mark()); 245 double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length); 246 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 247 if (pause_time_ms > target_pause_time_ms) { 248 // end condition 2: prediction is over the target pause time 249 return false; 250 } 251 252 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 253 254 // When copying, we will likely need more bytes free than is live in the region. 255 // Add some safety margin to factor in the confidence of our guess, and the 256 // natural expected waste. 257 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 258 // of the calculation: the lower the confidence, the more headroom. 259 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 260 // copying due to anticipated waste in the PLABs. 261 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 262 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 263 264 if (expected_bytes_to_copy > free_bytes) { 265 // end condition 3: out-of-space 266 return false; 267 } 268 269 // success! 270 return true; 271 } 272 273 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 274 // re-calculate the necessary reserve 275 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 276 // We use ceiling so that if reserve_regions_d is > 0.0 (but 277 // smaller than 1.0) we'll get 1. 278 _reserve_regions = (uint) ceil(reserve_regions_d); 279 280 _young_gen_sizer->heap_size_changed(new_number_of_regions); 281 282 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 283 } 284 285 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 286 uint base_min_length) const { 287 uint desired_min_length = 0; 288 if (adaptive_young_list_length()) { 289 if (_analytics->num_alloc_rate_ms() > 3) { 290 double now_sec = os::elapsedTime(); 291 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 292 double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); 293 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 294 } else { 295 // otherwise we don't have enough info to make the prediction 296 } 297 } 298 desired_min_length += base_min_length; 299 // make sure we don't go below any user-defined minimum bound 300 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 301 } 302 303 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 304 // Here, we might want to also take into account any additional 305 // constraints (i.e., user-defined minimum bound). Currently, we 306 // effectively don't set this bound. 307 return _young_gen_sizer->max_desired_young_length(); 308 } 309 310 uint G1CollectorPolicy::update_young_list_max_and_target_length() { 311 return update_young_list_max_and_target_length(_analytics->predict_rs_lengths()); 312 } 313 314 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { 315 uint unbounded_target_length = update_young_list_target_length(rs_lengths); 316 update_max_gc_locker_expansion(); 317 return unbounded_target_length; 318 } 319 320 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 321 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); 322 _young_list_target_length = young_lengths.first; 323 return young_lengths.second; 324 } 325 326 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { 327 YoungTargetLengths result; 328 329 // Calculate the absolute and desired min bounds first. 330 331 // This is how many young regions we already have (currently: the survivors). 332 const uint base_min_length = _g1->young_list()->survivor_length(); 333 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 334 // This is the absolute minimum young length. Ensure that we 335 // will at least have one eden region available for allocation. 336 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 337 // If we shrank the young list target it should not shrink below the current size. 338 desired_min_length = MAX2(desired_min_length, absolute_min_length); 339 // Calculate the absolute and desired max bounds. 340 341 uint desired_max_length = calculate_young_list_desired_max_length(); 342 343 uint young_list_target_length = 0; 344 if (adaptive_young_list_length()) { 345 if (collector_state()->gcs_are_young()) { 346 young_list_target_length = 347 calculate_young_list_target_length(rs_lengths, 348 base_min_length, 349 desired_min_length, 350 desired_max_length); 351 } else { 352 // Don't calculate anything and let the code below bound it to 353 // the desired_min_length, i.e., do the next GC as soon as 354 // possible to maximize how many old regions we can add to it. 355 } 356 } else { 357 // The user asked for a fixed young gen so we'll fix the young gen 358 // whether the next GC is young or mixed. 359 young_list_target_length = _young_list_fixed_length; 360 } 361 362 result.second = young_list_target_length; 363 364 // We will try our best not to "eat" into the reserve. 365 uint absolute_max_length = 0; 366 if (_free_regions_at_end_of_collection > _reserve_regions) { 367 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 368 } 369 if (desired_max_length > absolute_max_length) { 370 desired_max_length = absolute_max_length; 371 } 372 373 // Make sure we don't go over the desired max length, nor under the 374 // desired min length. In case they clash, desired_min_length wins 375 // which is why that test is second. 376 if (young_list_target_length > desired_max_length) { 377 young_list_target_length = desired_max_length; 378 } 379 if (young_list_target_length < desired_min_length) { 380 young_list_target_length = desired_min_length; 381 } 382 383 assert(young_list_target_length > base_min_length, 384 "we should be able to allocate at least one eden region"); 385 assert(young_list_target_length >= absolute_min_length, "post-condition"); 386 387 result.first = young_list_target_length; 388 return result; 389 } 390 391 uint 392 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 393 uint base_min_length, 394 uint desired_min_length, 395 uint desired_max_length) const { 396 assert(adaptive_young_list_length(), "pre-condition"); 397 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 398 399 // In case some edge-condition makes the desired max length too small... 400 if (desired_max_length <= desired_min_length) { 401 return desired_min_length; 402 } 403 404 // We'll adjust min_young_length and max_young_length not to include 405 // the already allocated young regions (i.e., so they reflect the 406 // min and max eden regions we'll allocate). The base_min_length 407 // will be reflected in the predictions by the 408 // survivor_regions_evac_time prediction. 409 assert(desired_min_length > base_min_length, "invariant"); 410 uint min_young_length = desired_min_length - base_min_length; 411 assert(desired_max_length > base_min_length, "invariant"); 412 uint max_young_length = desired_max_length - base_min_length; 413 414 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 415 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 416 size_t pending_cards = _analytics->predict_pending_cards(); 417 size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff(); 418 size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true); 419 double base_time_ms = 420 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 421 survivor_regions_evac_time; 422 uint available_free_regions = _free_regions_at_end_of_collection; 423 uint base_free_regions = 0; 424 if (available_free_regions > _reserve_regions) { 425 base_free_regions = available_free_regions - _reserve_regions; 426 } 427 428 // Here, we will make sure that the shortest young length that 429 // makes sense fits within the target pause time. 430 431 if (predict_will_fit(min_young_length, base_time_ms, 432 base_free_regions, target_pause_time_ms)) { 433 // The shortest young length will fit into the target pause time; 434 // we'll now check whether the absolute maximum number of young 435 // regions will fit in the target pause time. If not, we'll do 436 // a binary search between min_young_length and max_young_length. 437 if (predict_will_fit(max_young_length, base_time_ms, 438 base_free_regions, target_pause_time_ms)) { 439 // The maximum young length will fit into the target pause time. 440 // We are done so set min young length to the maximum length (as 441 // the result is assumed to be returned in min_young_length). 442 min_young_length = max_young_length; 443 } else { 444 // The maximum possible number of young regions will not fit within 445 // the target pause time so we'll search for the optimal 446 // length. The loop invariants are: 447 // 448 // min_young_length < max_young_length 449 // min_young_length is known to fit into the target pause time 450 // max_young_length is known not to fit into the target pause time 451 // 452 // Going into the loop we know the above hold as we've just 453 // checked them. Every time around the loop we check whether 454 // the middle value between min_young_length and 455 // max_young_length fits into the target pause time. If it 456 // does, it becomes the new min. If it doesn't, it becomes 457 // the new max. This way we maintain the loop invariants. 458 459 assert(min_young_length < max_young_length, "invariant"); 460 uint diff = (max_young_length - min_young_length) / 2; 461 while (diff > 0) { 462 uint young_length = min_young_length + diff; 463 if (predict_will_fit(young_length, base_time_ms, 464 base_free_regions, target_pause_time_ms)) { 465 min_young_length = young_length; 466 } else { 467 max_young_length = young_length; 468 } 469 assert(min_young_length < max_young_length, "invariant"); 470 diff = (max_young_length - min_young_length) / 2; 471 } 472 // The results is min_young_length which, according to the 473 // loop invariants, should fit within the target pause time. 474 475 // These are the post-conditions of the binary search above: 476 assert(min_young_length < max_young_length, 477 "otherwise we should have discovered that max_young_length " 478 "fits into the pause target and not done the binary search"); 479 assert(predict_will_fit(min_young_length, base_time_ms, 480 base_free_regions, target_pause_time_ms), 481 "min_young_length, the result of the binary search, should " 482 "fit into the pause target"); 483 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 484 base_free_regions, target_pause_time_ms), 485 "min_young_length, the result of the binary search, should be " 486 "optimal, so no larger length should fit into the pause target"); 487 } 488 } else { 489 // Even the minimum length doesn't fit into the pause time 490 // target, return it as the result nevertheless. 491 } 492 return base_min_length + min_young_length; 493 } 494 495 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 496 double survivor_regions_evac_time = 0.0; 497 for (HeapRegion * r = _g1->young_list()->first_survivor_region(); 498 r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region(); 499 r = r->get_next_young_region()) { 500 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 501 } 502 return survivor_regions_evac_time; 503 } 504 505 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { 506 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 507 508 if (rs_lengths > _rs_lengths_prediction) { 509 // add 10% to avoid having to recalculate often 510 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 511 update_rs_lengths_prediction(rs_lengths_prediction); 512 513 update_young_list_max_and_target_length(rs_lengths_prediction); 514 } 515 } 516 517 void G1CollectorPolicy::update_rs_lengths_prediction() { 518 update_rs_lengths_prediction(_analytics->predict_rs_lengths()); 519 } 520 521 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 522 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 523 _rs_lengths_prediction = prediction; 524 } 525 } 526 527 #ifndef PRODUCT 528 bool G1CollectorPolicy::verify_young_ages() { 529 HeapRegion* head = _g1->young_list()->first_region(); 530 return 531 verify_young_ages(head, _short_lived_surv_rate_group); 532 // also call verify_young_ages on any additional surv rate groups 533 } 534 535 bool 536 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 537 SurvRateGroup *surv_rate_group) { 538 guarantee( surv_rate_group != NULL, "pre-condition" ); 539 540 const char* name = surv_rate_group->name(); 541 bool ret = true; 542 int prev_age = -1; 543 544 for (HeapRegion* curr = head; 545 curr != NULL; 546 curr = curr->get_next_young_region()) { 547 SurvRateGroup* group = curr->surv_rate_group(); 548 if (group == NULL && !curr->is_survivor()) { 549 log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); 550 ret = false; 551 } 552 553 if (surv_rate_group == group) { 554 int age = curr->age_in_surv_rate_group(); 555 556 if (age < 0) { 557 log_error(gc, verify)("## %s: encountered negative age", name); 558 ret = false; 559 } 560 561 if (age <= prev_age) { 562 log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); 563 ret = false; 564 } 565 prev_age = age; 566 } 567 } 568 569 return ret; 570 } 571 #endif // PRODUCT 572 573 void G1CollectorPolicy::record_full_collection_start() { 574 _full_collection_start_sec = os::elapsedTime(); 575 // Release the future to-space so that it is available for compaction into. 576 collector_state()->set_full_collection(true); 577 } 578 579 void G1CollectorPolicy::record_full_collection_end() { 580 // Consider this like a collection pause for the purposes of allocation 581 // since last pause. 582 double end_sec = os::elapsedTime(); 583 double full_gc_time_sec = end_sec - _full_collection_start_sec; 584 double full_gc_time_ms = full_gc_time_sec * 1000.0; 585 586 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); 587 588 collector_state()->set_full_collection(false); 589 590 // "Nuke" the heuristics that control the young/mixed GC 591 // transitions and make sure we start with young GCs after the Full GC. 592 collector_state()->set_gcs_are_young(true); 593 collector_state()->set_last_young_gc(false); 594 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 595 collector_state()->set_during_initial_mark_pause(false); 596 collector_state()->set_in_marking_window(false); 597 collector_state()->set_in_marking_window_im(false); 598 599 _short_lived_surv_rate_group->start_adding_regions(); 600 // also call this on any additional surv rate groups 601 602 _free_regions_at_end_of_collection = _g1->num_free_regions(); 603 // Reset survivors SurvRateGroup. 604 _survivor_surv_rate_group->reset(); 605 update_young_list_max_and_target_length(); 606 update_rs_lengths_prediction(); 607 cset_chooser()->clear(); 608 609 _bytes_allocated_in_old_since_last_gc = 0; 610 611 record_pause(FullGC, _full_collection_start_sec, end_sec); 612 } 613 614 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 615 // We only need to do this here as the policy will only be applied 616 // to the GC we're about to start. so, no point is calculating this 617 // every time we calculate / recalculate the target young length. 618 update_survivors_policy(); 619 620 assert(_g1->used() == _g1->recalculate_used(), 621 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 622 _g1->used(), _g1->recalculate_used()); 623 624 phase_times()->record_cur_collection_start_sec(start_time_sec); 625 _pending_cards = _g1->pending_card_num(); 626 627 _collection_set->reset_bytes_used_before(); 628 _bytes_copied_during_gc = 0; 629 630 collector_state()->set_last_gc_was_young(false); 631 632 // do that for any other surv rate groups 633 _short_lived_surv_rate_group->stop_adding_regions(); 634 _survivors_age_table.clear(); 635 636 assert( verify_young_ages(), "region age verification" ); 637 } 638 639 void G1CollectorPolicy::record_concurrent_mark_init_end(double 640 mark_init_elapsed_time_ms) { 641 collector_state()->set_during_marking(true); 642 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 643 collector_state()->set_during_initial_mark_pause(false); 644 } 645 646 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 647 _mark_remark_start_sec = os::elapsedTime(); 648 collector_state()->set_during_marking(false); 649 } 650 651 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 652 double end_time_sec = os::elapsedTime(); 653 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 654 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); 655 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 656 657 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 658 } 659 660 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 661 _mark_cleanup_start_sec = os::elapsedTime(); 662 } 663 664 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 665 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 666 "skip last young-only gc"); 667 collector_state()->set_last_young_gc(should_continue_with_reclaim); 668 // We skip the marking phase. 669 if (!should_continue_with_reclaim) { 670 abort_time_to_mixed_tracking(); 671 } 672 collector_state()->set_in_marking_window(false); 673 } 674 675 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 676 return phase_times()->average_time_ms(phase); 677 } 678 679 double G1CollectorPolicy::young_other_time_ms() const { 680 return phase_times()->young_cset_choice_time_ms() + 681 phase_times()->young_free_cset_time_ms(); 682 } 683 684 double G1CollectorPolicy::non_young_other_time_ms() const { 685 return phase_times()->non_young_cset_choice_time_ms() + 686 phase_times()->non_young_free_cset_time_ms(); 687 688 } 689 690 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 691 return pause_time_ms - 692 average_time_ms(G1GCPhaseTimes::UpdateRS) - 693 average_time_ms(G1GCPhaseTimes::ScanRS) - 694 average_time_ms(G1GCPhaseTimes::ObjCopy) - 695 average_time_ms(G1GCPhaseTimes::Termination); 696 } 697 698 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 699 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 700 } 701 702 CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { 703 return _collection_set->cset_chooser(); 704 } 705 706 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 707 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 708 } 709 710 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 711 if (about_to_start_mixed_phase()) { 712 return false; 713 } 714 715 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 716 717 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 718 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 719 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 720 721 bool result = false; 722 if (marking_request_bytes > marking_initiating_used_threshold) { 723 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); 724 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 725 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 726 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); 727 } 728 729 return result; 730 } 731 732 // Anything below that is considered to be zero 733 #define MIN_TIMER_GRANULARITY 0.0000001 734 735 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { 736 double end_time_sec = os::elapsedTime(); 737 738 size_t cur_used_bytes = _g1->used(); 739 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 740 bool last_pause_included_initial_mark = false; 741 bool update_stats = !_g1->evacuation_failed(); 742 743 NOT_PRODUCT(_short_lived_surv_rate_group->print()); 744 745 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 746 747 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 748 if (last_pause_included_initial_mark) { 749 record_concurrent_mark_init_end(0.0); 750 } else { 751 maybe_start_marking(); 752 } 753 754 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); 755 if (app_time_ms < MIN_TIMER_GRANULARITY) { 756 // This usually happens due to the timer not having the required 757 // granularity. Some Linuxes are the usual culprits. 758 // We'll just set it to something (arbitrarily) small. 759 app_time_ms = 1.0; 760 } 761 762 if (update_stats) { 763 // We maintain the invariant that all objects allocated by mutator 764 // threads will be allocated out of eden regions. So, we can use 765 // the eden region number allocated since the previous GC to 766 // calculate the application's allocate rate. The only exception 767 // to that is humongous objects that are allocated separately. But 768 // given that humongous object allocations do not really affect 769 // either the pause's duration nor when the next pause will take 770 // place we can safely ignore them here. 771 uint regions_allocated = _collection_set->eden_region_length(); 772 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 773 _analytics->report_alloc_rate_ms(alloc_rate_ms); 774 775 double interval_ms = 776 (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; 777 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); 778 _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); 779 } 780 781 bool new_in_marking_window = collector_state()->in_marking_window(); 782 bool new_in_marking_window_im = false; 783 if (last_pause_included_initial_mark) { 784 new_in_marking_window = true; 785 new_in_marking_window_im = true; 786 } 787 788 if (collector_state()->last_young_gc()) { 789 // This is supposed to to be the "last young GC" before we start 790 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 791 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 792 793 if (next_gc_should_be_mixed("start mixed GCs", 794 "do not start mixed GCs")) { 795 collector_state()->set_gcs_are_young(false); 796 } else { 797 // We aborted the mixed GC phase early. 798 abort_time_to_mixed_tracking(); 799 } 800 801 collector_state()->set_last_young_gc(false); 802 } 803 804 if (!collector_state()->last_gc_was_young()) { 805 // This is a mixed GC. Here we decide whether to continue doing 806 // mixed GCs or not. 807 if (!next_gc_should_be_mixed("continue mixed GCs", 808 "do not continue mixed GCs")) { 809 collector_state()->set_gcs_are_young(true); 810 811 maybe_start_marking(); 812 } 813 } 814 815 _short_lived_surv_rate_group->start_adding_regions(); 816 // Do that for any other surv rate groups 817 818 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; 819 820 if (update_stats) { 821 double cost_per_card_ms = 0.0; 822 if (_pending_cards > 0) { 823 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; 824 _analytics->report_cost_per_card_ms(cost_per_card_ms); 825 } 826 _analytics->report_cost_scan_hcc(scan_hcc_time_ms); 827 828 double cost_per_entry_ms = 0.0; 829 if (cards_scanned > 10) { 830 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 831 _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young()); 832 } 833 834 if (_max_rs_lengths > 0) { 835 double cards_per_entry_ratio = 836 (double) cards_scanned / (double) _max_rs_lengths; 837 _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young()); 838 } 839 840 // This is defensive. For a while _max_rs_lengths could get 841 // smaller than _recorded_rs_lengths which was causing 842 // rs_length_diff to get very large and mess up the RSet length 843 // predictions. The reason was unsafe concurrent updates to the 844 // _inc_cset_recorded_rs_lengths field which the code below guards 845 // against (see CR 7118202). This bug has now been fixed (see CR 846 // 7119027). However, I'm still worried that 847 // _inc_cset_recorded_rs_lengths might still end up somewhat 848 // inaccurate. The concurrent refinement thread calculates an 849 // RSet's length concurrently with other CR threads updating it 850 // which might cause it to calculate the length incorrectly (if, 851 // say, it's in mid-coarsening). So I'll leave in the defensive 852 // conditional below just in case. 853 size_t rs_length_diff = 0; 854 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); 855 if (_max_rs_lengths > recorded_rs_lengths) { 856 rs_length_diff = _max_rs_lengths - recorded_rs_lengths; 857 } 858 _analytics->report_rs_length_diff((double) rs_length_diff); 859 860 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; 861 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; 862 double cost_per_byte_ms = 0.0; 863 864 if (copied_bytes > 0) { 865 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 866 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window()); 867 } 868 869 if (_collection_set->young_region_length() > 0) { 870 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / 871 _collection_set->young_region_length()); 872 } 873 874 if (_collection_set->old_region_length() > 0) { 875 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / 876 _collection_set->old_region_length()); 877 } 878 879 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); 880 881 _analytics->report_pending_cards((double) _pending_cards); 882 _analytics->report_rs_lengths((double) _max_rs_lengths); 883 } 884 885 collector_state()->set_in_marking_window(new_in_marking_window); 886 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 887 _free_regions_at_end_of_collection = _g1->num_free_regions(); 888 // IHOP control wants to know the expected young gen length if it were not 889 // restrained by the heap reserve. Using the actual length would make the 890 // prediction too small and the limit the young gen every time we get to the 891 // predicted target occupancy. 892 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 893 update_rs_lengths_prediction(); 894 895 update_ihop_prediction(app_time_ms / 1000.0, 896 _bytes_allocated_in_old_since_last_gc, 897 last_unrestrained_young_length * HeapRegion::GrainBytes); 898 _bytes_allocated_in_old_since_last_gc = 0; 899 900 _ihop_control->send_trace_event(_g1->gc_tracer_stw()); 901 902 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 903 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 904 905 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 906 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 907 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", 908 update_rs_time_goal_ms, scan_hcc_time_ms); 909 910 update_rs_time_goal_ms = 0; 911 } else { 912 update_rs_time_goal_ms -= scan_hcc_time_ms; 913 } 914 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 915 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 916 update_rs_time_goal_ms); 917 918 cset_chooser()->verify(); 919 } 920 921 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 922 if (G1UseAdaptiveIHOP) { 923 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 924 &_predictor, 925 G1ReservePercent, 926 G1HeapWastePercent); 927 } else { 928 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 929 } 930 } 931 932 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, 933 size_t mutator_alloc_bytes, 934 size_t young_gen_size) { 935 // Always try to update IHOP prediction. Even evacuation failures give information 936 // about e.g. whether to start IHOP earlier next time. 937 938 // Avoid using really small application times that might create samples with 939 // very high or very low values. They may be caused by e.g. back-to-back gcs. 940 double const min_valid_time = 1e-6; 941 942 bool report = false; 943 944 double marking_to_mixed_time = -1.0; 945 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 946 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 947 assert(marking_to_mixed_time > 0.0, 948 "Initial mark to mixed time must be larger than zero but is %.3f", 949 marking_to_mixed_time); 950 if (marking_to_mixed_time > min_valid_time) { 951 _ihop_control->update_marking_length(marking_to_mixed_time); 952 report = true; 953 } 954 } 955 956 // As an approximation for the young gc promotion rates during marking we use 957 // all of them. In many applications there are only a few if any young gcs during 958 // marking, which makes any prediction useless. This increases the accuracy of the 959 // prediction. 960 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 961 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 962 report = true; 963 } 964 965 if (report) { 966 report_ihop_statistics(); 967 } 968 } 969 970 void G1CollectorPolicy::report_ihop_statistics() { 971 _ihop_control->print(); 972 } 973 974 void G1CollectorPolicy::print_phases() { 975 phase_times()->print(); 976 } 977 978 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 979 double update_rs_processed_buffers, 980 double goal_ms) { 981 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 982 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 983 984 if (G1UseAdaptiveConcRefinement) { 985 const int k_gy = 3, k_gr = 6; 986 const double inc_k = 1.1, dec_k = 0.9; 987 988 size_t g = cg1r->green_zone(); 989 if (update_rs_time > goal_ms) { 990 g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 991 } else { 992 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 993 g = (size_t)MAX2(g * inc_k, g + 1.0); 994 } 995 } 996 // Change the refinement threads params 997 cg1r->set_green_zone(g); 998 cg1r->set_yellow_zone(g * k_gy); 999 cg1r->set_red_zone(g * k_gr); 1000 cg1r->reinitialize_threads(); 1001 1002 size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1); 1003 size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1004 cg1r->yellow_zone()); 1005 // Change the barrier params 1006 dcqs.set_process_completed_threshold((int)processing_threshold); 1007 dcqs.set_max_completed_queue((int)cg1r->red_zone()); 1008 } 1009 1010 size_t curr_queue_size = dcqs.completed_buffers_num(); 1011 if (curr_queue_size >= cg1r->yellow_zone()) { 1012 dcqs.set_completed_queue_padding(curr_queue_size); 1013 } else { 1014 dcqs.set_completed_queue_padding(0); 1015 } 1016 dcqs.notify_if_necessary(); 1017 } 1018 1019 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1020 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1021 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1022 double pred = _predictor.get_new_prediction(seq); 1023 if (pred > 1.0) { 1024 pred = 1.0; 1025 } 1026 return pred; 1027 } 1028 1029 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1030 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1031 } 1032 1033 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1034 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1035 } 1036 1037 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1038 size_t scanned_cards) const { 1039 return 1040 _analytics->predict_rs_update_time_ms(pending_cards) + 1041 _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) + 1042 _analytics->predict_constant_other_time_ms(); 1043 } 1044 1045 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1046 size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff(); 1047 size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young()); 1048 return predict_base_elapsed_time_ms(pending_cards, card_num); 1049 } 1050 1051 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1052 size_t bytes_to_copy; 1053 if (hr->is_marked()) 1054 bytes_to_copy = hr->max_live_bytes(); 1055 else { 1056 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1057 int age = hr->age_in_surv_rate_group(); 1058 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1059 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1060 } 1061 return bytes_to_copy; 1062 } 1063 1064 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1065 bool for_young_gc) const { 1066 size_t rs_length = hr->rem_set()->occupied(); 1067 // Predicting the number of cards is based on which type of GC 1068 // we're predicting for. 1069 size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc); 1070 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1071 1072 double region_elapsed_time_ms = 1073 _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) + 1074 _analytics->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark()); 1075 1076 // The prediction of the "other" time for this region is based 1077 // upon the region type and NOT the GC type. 1078 if (hr->is_young()) { 1079 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); 1080 } else { 1081 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); 1082 } 1083 return region_elapsed_time_ms; 1084 } 1085 1086 void G1CollectorPolicy::clear_ratio_check_data() { 1087 _ratio_over_threshold_count = 0; 1088 _ratio_over_threshold_sum = 0.0; 1089 _pauses_since_start = 0; 1090 } 1091 1092 size_t G1CollectorPolicy::expansion_amount() { 1093 double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; 1094 double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; 1095 double threshold = _gc_overhead_perc; 1096 size_t expand_bytes = 0; 1097 1098 // If the heap is at less than half its maximum size, scale the threshold down, 1099 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 1100 // though the scaling code will likely keep the increase small. 1101 if (_g1->capacity() <= _g1->max_capacity() / 2) { 1102 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); 1103 threshold = MAX2(threshold, 1.0); 1104 } 1105 1106 // If the last GC time ratio is over the threshold, increment the count of 1107 // times it has been exceeded, and add this ratio to the sum of exceeded 1108 // ratios. 1109 if (last_gc_overhead > threshold) { 1110 _ratio_over_threshold_count++; 1111 _ratio_over_threshold_sum += last_gc_overhead; 1112 } 1113 1114 // Check if we've had enough GC time ratio checks that were over the 1115 // threshold to trigger an expansion. We'll also expand if we've 1116 // reached the end of the history buffer and the average of all entries 1117 // is still over the threshold. This indicates a smaller number of GCs were 1118 // long enough to make the average exceed the threshold. 1119 bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; 1120 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 1121 (filled_history_buffer && (recent_gc_overhead > threshold))) { 1122 size_t min_expand_bytes = HeapRegion::GrainBytes; 1123 size_t reserved_bytes = _g1->max_capacity(); 1124 size_t committed_bytes = _g1->capacity(); 1125 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1126 size_t expand_bytes_via_pct = 1127 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1128 double scale_factor = 1.0; 1129 1130 // If the current size is less than 1/4 of the Initial heap size, expand 1131 // by half of the delta between the current and Initial sizes. IE, grow 1132 // back quickly. 1133 // 1134 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 1135 // the available expansion space, whichever is smaller, as the base 1136 // expansion size. Then possibly scale this size according to how much the 1137 // threshold has (on average) been exceeded by. If the delta is small 1138 // (less than the StartScaleDownAt value), scale the size down linearly, but 1139 // not by less than MinScaleDownFactor. If the delta is large (greater than 1140 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 1141 // times the base size. The scaling will be linear in the range from 1142 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 1143 // ScaleUpRange sets the rate of scaling up. 1144 if (committed_bytes < InitialHeapSize / 4) { 1145 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 1146 } else { 1147 double const MinScaleDownFactor = 0.2; 1148 double const MaxScaleUpFactor = 2; 1149 double const StartScaleDownAt = _gc_overhead_perc; 1150 double const StartScaleUpAt = _gc_overhead_perc * 1.5; 1151 double const ScaleUpRange = _gc_overhead_perc * 2.0; 1152 1153 double ratio_delta; 1154 if (filled_history_buffer) { 1155 ratio_delta = recent_gc_overhead - threshold; 1156 } else { 1157 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 1158 } 1159 1160 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1161 if (ratio_delta < StartScaleDownAt) { 1162 scale_factor = ratio_delta / StartScaleDownAt; 1163 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 1164 } else if (ratio_delta > StartScaleUpAt) { 1165 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 1166 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 1167 } 1168 } 1169 1170 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 1171 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 1172 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 1173 1174 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 1175 1176 // Ensure the expansion size is at least the minimum growth amount 1177 // and at most the remaining uncommitted byte size. 1178 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1179 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1180 1181 clear_ratio_check_data(); 1182 } else { 1183 // An expansion was not triggered. If we've started counting, increment 1184 // the number of checks we've made in the current window. If we've 1185 // reached the end of the window without resizing, clear the counters to 1186 // start again the next time we see a ratio above the threshold. 1187 if (_ratio_over_threshold_count > 0) { 1188 _pauses_since_start++; 1189 if (_pauses_since_start > NumPrevPausesForHeuristics) { 1190 clear_ratio_check_data(); 1191 } 1192 } 1193 } 1194 1195 return expand_bytes; 1196 } 1197 1198 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1199 #ifndef PRODUCT 1200 _short_lived_surv_rate_group->print_surv_rate_summary(); 1201 // add this call for any other surv rate groups 1202 #endif // PRODUCT 1203 } 1204 1205 bool G1CollectorPolicy::is_young_list_full() const { 1206 uint young_list_length = _g1->young_list()->length(); 1207 uint young_list_target_length = _young_list_target_length; 1208 return young_list_length >= young_list_target_length; 1209 } 1210 1211 bool G1CollectorPolicy::can_expand_young_list() const { 1212 uint young_list_length = _g1->young_list()->length(); 1213 uint young_list_max_length = _young_list_max_length; 1214 return young_list_length < young_list_max_length; 1215 } 1216 1217 bool G1CollectorPolicy::adaptive_young_list_length() const { 1218 return _young_gen_sizer->adaptive_young_list_length(); 1219 } 1220 1221 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1222 uint expansion_region_num = 0; 1223 if (GCLockerEdenExpansionPercent > 0) { 1224 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1225 double expansion_region_num_d = perc * (double) _young_list_target_length; 1226 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1227 // less than 1.0) we'll get 1. 1228 expansion_region_num = (uint) ceil(expansion_region_num_d); 1229 } else { 1230 assert(expansion_region_num == 0, "sanity"); 1231 } 1232 _young_list_max_length = _young_list_target_length + expansion_region_num; 1233 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1234 } 1235 1236 // Calculates survivor space parameters. 1237 void G1CollectorPolicy::update_survivors_policy() { 1238 double max_survivor_regions_d = 1239 (double) _young_list_target_length / (double) SurvivorRatio; 1240 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1241 // smaller than 1.0) we'll get 1. 1242 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1243 1244 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1245 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1246 } 1247 1248 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1249 // We actually check whether we are marking here and not if we are in a 1250 // reclamation phase. This means that we will schedule a concurrent mark 1251 // even while we are still in the process of reclaiming memory. 1252 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1253 if (!during_cycle) { 1254 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1255 collector_state()->set_initiate_conc_mark_if_possible(true); 1256 return true; 1257 } else { 1258 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1259 return false; 1260 } 1261 } 1262 1263 void G1CollectorPolicy::initiate_conc_mark() { 1264 collector_state()->set_during_initial_mark_pause(true); 1265 collector_state()->set_initiate_conc_mark_if_possible(false); 1266 } 1267 1268 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1269 // We are about to decide on whether this pause will be an 1270 // initial-mark pause. 1271 1272 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1273 // will set it here if we have to. However, it should be cleared by 1274 // the end of the pause (it's only set for the duration of an 1275 // initial-mark pause). 1276 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1277 1278 if (collector_state()->initiate_conc_mark_if_possible()) { 1279 // We had noticed on a previous pause that the heap occupancy has 1280 // gone over the initiating threshold and we should start a 1281 // concurrent marking cycle. So we might initiate one. 1282 1283 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1284 // Initiate a new initial mark if there is no marking or reclamation going on. 1285 initiate_conc_mark(); 1286 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1287 } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { 1288 // Initiate a user requested initial mark. An initial mark must be young only 1289 // GC, so the collector state must be updated to reflect this. 1290 collector_state()->set_gcs_are_young(true); 1291 collector_state()->set_last_young_gc(false); 1292 1293 abort_time_to_mixed_tracking(); 1294 initiate_conc_mark(); 1295 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); 1296 } else { 1297 // The concurrent marking thread is still finishing up the 1298 // previous cycle. If we start one right now the two cycles 1299 // overlap. In particular, the concurrent marking thread might 1300 // be in the process of clearing the next marking bitmap (which 1301 // we will use for the next cycle if we start one). Starting a 1302 // cycle now will be bad given that parts of the marking 1303 // information might get cleared by the marking thread. And we 1304 // cannot wait for the marking thread to finish the cycle as it 1305 // periodically yields while clearing the next marking bitmap 1306 // and, if it's in a yield point, it's waiting for us to 1307 // finish. So, at this point we will not start a cycle and we'll 1308 // let the concurrent marking thread complete the last one. 1309 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1310 } 1311 } 1312 } 1313 1314 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1315 G1CollectedHeap* _g1h; 1316 CSetChooserParUpdater _cset_updater; 1317 1318 public: 1319 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1320 uint chunk_size) : 1321 _g1h(G1CollectedHeap::heap()), 1322 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1323 1324 bool doHeapRegion(HeapRegion* r) { 1325 // Do we have any marking information for this region? 1326 if (r->is_marked()) { 1327 // We will skip any region that's currently used as an old GC 1328 // alloc region (we should not consider those for collection 1329 // before we fill them up). 1330 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1331 _cset_updater.add_region(r); 1332 } 1333 } 1334 return false; 1335 } 1336 }; 1337 1338 class ParKnownGarbageTask: public AbstractGangTask { 1339 CollectionSetChooser* _hrSorted; 1340 uint _chunk_size; 1341 G1CollectedHeap* _g1; 1342 HeapRegionClaimer _hrclaimer; 1343 1344 public: 1345 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1346 AbstractGangTask("ParKnownGarbageTask"), 1347 _hrSorted(hrSorted), _chunk_size(chunk_size), 1348 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1349 1350 void work(uint worker_id) { 1351 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1352 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1353 } 1354 }; 1355 1356 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1357 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1358 const uint overpartition_factor = 4; 1359 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1360 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1361 } 1362 1363 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1364 cset_chooser()->clear(); 1365 1366 WorkGang* workers = _g1->workers(); 1367 uint n_workers = workers->active_workers(); 1368 1369 uint n_regions = _g1->num_regions(); 1370 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1371 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1372 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); 1373 workers->run_task(&par_known_garbage_task); 1374 1375 cset_chooser()->sort_regions(); 1376 1377 double end_sec = os::elapsedTime(); 1378 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1379 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); 1380 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 1381 1382 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1383 } 1384 1385 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 1386 // Returns the given amount of reclaimable bytes (that represents 1387 // the amount of reclaimable space still to be collected) as a 1388 // percentage of the current heap capacity. 1389 size_t capacity_bytes = _g1->capacity(); 1390 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1391 } 1392 1393 void G1CollectorPolicy::maybe_start_marking() { 1394 if (need_to_start_conc_mark("end of GC")) { 1395 // Note: this might have already been set, if during the last 1396 // pause we decided to start a cycle but at the beginning of 1397 // this pause we decided to postpone it. That's OK. 1398 collector_state()->set_initiate_conc_mark_if_possible(true); 1399 } 1400 } 1401 1402 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 1403 assert(!collector_state()->full_collection(), "must be"); 1404 if (collector_state()->during_initial_mark_pause()) { 1405 assert(collector_state()->last_gc_was_young(), "must be"); 1406 assert(!collector_state()->last_young_gc(), "must be"); 1407 return InitialMarkGC; 1408 } else if (collector_state()->last_young_gc()) { 1409 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1410 assert(collector_state()->last_gc_was_young(), "must be"); 1411 return LastYoungGC; 1412 } else if (!collector_state()->last_gc_was_young()) { 1413 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1414 assert(!collector_state()->last_young_gc(), "must be"); 1415 return MixedGC; 1416 } else { 1417 assert(collector_state()->last_gc_was_young(), "must be"); 1418 assert(!collector_state()->during_initial_mark_pause(), "must be"); 1419 assert(!collector_state()->last_young_gc(), "must be"); 1420 return YoungOnlyGC; 1421 } 1422 } 1423 1424 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 1425 // Manage the MMU tracker. For some reason it ignores Full GCs. 1426 if (kind != FullGC) { 1427 _mmu_tracker->add_pause(start, end); 1428 } 1429 // Manage the mutator time tracking from initial mark to first mixed gc. 1430 switch (kind) { 1431 case FullGC: 1432 abort_time_to_mixed_tracking(); 1433 break; 1434 case Cleanup: 1435 case Remark: 1436 case YoungOnlyGC: 1437 case LastYoungGC: 1438 _initial_mark_to_mixed.add_pause(end - start); 1439 break; 1440 case InitialMarkGC: 1441 _initial_mark_to_mixed.record_initial_mark_end(end); 1442 break; 1443 case MixedGC: 1444 _initial_mark_to_mixed.record_mixed_gc_start(start); 1445 break; 1446 default: 1447 ShouldNotReachHere(); 1448 } 1449 } 1450 1451 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 1452 _initial_mark_to_mixed.reset(); 1453 } 1454 1455 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1456 const char* false_action_str) const { 1457 if (cset_chooser()->is_empty()) { 1458 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1459 return false; 1460 } 1461 1462 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1463 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); 1464 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1465 double threshold = (double) G1HeapWastePercent; 1466 if (reclaimable_perc <= threshold) { 1467 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1468 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1469 return false; 1470 } 1471 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1472 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); 1473 return true; 1474 } 1475 1476 uint G1CollectorPolicy::calc_min_old_cset_length() const { 1477 // The min old CSet region bound is based on the maximum desired 1478 // number of mixed GCs after a cycle. I.e., even if some old regions 1479 // look expensive, we should add them to the CSet anyway to make 1480 // sure we go through the available old regions in no more than the 1481 // maximum desired number of mixed GCs. 1482 // 1483 // The calculation is based on the number of marked regions we added 1484 // to the CSet chooser in the first place, not how many remain, so 1485 // that the result is the same during all mixed GCs that follow a cycle. 1486 1487 const size_t region_num = (size_t) cset_chooser()->length(); 1488 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1489 size_t result = region_num / gc_num; 1490 // emulate ceiling 1491 if (result * gc_num < region_num) { 1492 result += 1; 1493 } 1494 return (uint) result; 1495 } 1496 1497 uint G1CollectorPolicy::calc_max_old_cset_length() const { 1498 // The max old CSet region bound is based on the threshold expressed 1499 // as a percentage of the heap size. I.e., it should bound the 1500 // number of old regions added to the CSet irrespective of how many 1501 // of them are available. 1502 1503 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1504 const size_t region_num = g1h->num_regions(); 1505 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1506 size_t result = region_num * perc / 100; 1507 // emulate ceiling 1508 if (100 * result < region_num * perc) { 1509 result += 1; 1510 } 1511 return (uint) result; 1512 } 1513 1514 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { 1515 double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); 1516 _collection_set->finalize_old_part(time_remaining_ms); 1517 }