rev 59939 : [mq]: 8243974-investigate-millis-since-last-gc-move
1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Analytics.hpp" 27 #include "gc/g1/g1Arguments.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectionSetCandidates.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1ConcurrentRefine.hpp" 34 #include "gc/g1/g1ConcurrentRefineStats.hpp" 35 #include "gc/g1/g1CollectionSetChooser.hpp" 36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp" 37 #include "gc/g1/g1HotCardCache.hpp" 38 #include "gc/g1/g1IHOPControl.hpp" 39 #include "gc/g1/g1GCPhaseTimes.hpp" 40 #include "gc/g1/g1Policy.hpp" 41 #include "gc/g1/g1SurvivorRegions.hpp" 42 #include "gc/g1/g1YoungGenSizer.hpp" 43 #include "gc/g1/heapRegion.inline.hpp" 44 #include "gc/g1/heapRegionRemSet.hpp" 45 #include "gc/shared/concurrentGCBreakpoints.hpp" 46 #include "gc/shared/gcPolicyCounters.hpp" 47 #include "logging/log.hpp" 48 #include "runtime/arguments.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/mutexLocker.hpp" 52 #include "utilities/debug.hpp" 53 #include "utilities/growableArray.hpp" 54 #include "utilities/pair.hpp" 55 56 G1Policy::G1Policy(STWGCTimer* gc_timer) : 57 _predictor(G1ConfidencePercent / 100.0), 58 _analytics(new G1Analytics(&_predictor)), 59 _remset_tracker(), 60 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), 61 _ihop_control(create_ihop_control(&_predictor)), 62 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)), 63 _full_collection_start_sec(0.0), 64 _young_list_desired_length(0), 65 _young_list_target_length(0), 66 _young_list_max_length(0), 67 _eden_surv_rate_group(new G1SurvRateGroup()), 68 _survivor_surv_rate_group(new G1SurvRateGroup()), 69 _reserve_factor((double) G1ReservePercent / 100.0), 70 _reserve_regions(0), 71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()), 72 _free_regions_at_end_of_collection(0), 73 _rs_length(0), 74 _rs_length_prediction(0), 75 _pending_cards_at_gc_start(0), 76 _old_gen_alloc_tracker(), 77 _initial_mark_to_mixed(), 78 _collection_set(NULL), 79 _g1h(NULL), 80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), 81 _mark_remark_start_sec(0), 82 _mark_cleanup_start_sec(0), 83 _tenuring_threshold(MaxTenuringThreshold), 84 _max_survivor_regions(0), 85 _survivors_age_table(true) 86 { 87 } 88 89 G1Policy::~G1Policy() { 90 delete _ihop_control; 91 delete _young_gen_sizer; 92 } 93 94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) { 95 if (G1Arguments::is_heterogeneous_heap()) { 96 return new G1HeterogeneousHeapPolicy(gc_timer_stw); 97 } else { 98 return new G1Policy(gc_timer_stw); 99 } 100 } 101 102 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); } 103 104 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { 105 _g1h = g1h; 106 _collection_set = collection_set; 107 108 assert(Heap_lock->owned_by_self(), "Locking discipline."); 109 110 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions()); 111 112 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 113 114 update_young_length_bounds(); 115 // We may immediately start allocating regions and placing them on the 116 // collection set list. Initialize the per-collection set info 117 _collection_set->start_incremental_building(); 118 } 119 120 void G1Policy::note_gc_start() { 121 phase_times()->note_gc_start(); 122 } 123 124 class G1YoungLengthPredictor { 125 const double _base_time_ms; 126 const double _base_free_regions; 127 const double _target_pause_time_ms; 128 const G1Policy* const _policy; 129 130 public: 131 G1YoungLengthPredictor(double base_time_ms, 132 double base_free_regions, 133 double target_pause_time_ms, 134 const G1Policy* policy) : 135 _base_time_ms(base_time_ms), 136 _base_free_regions(base_free_regions), 137 _target_pause_time_ms(target_pause_time_ms), 138 _policy(policy) {} 139 140 bool will_fit(uint young_length) const { 141 if (young_length >= _base_free_regions) { 142 // end condition 1: not enough space for the young regions 143 return false; 144 } 145 146 size_t bytes_to_copy = 0; 147 const double copy_time_ms = _policy->predict_eden_copy_time_ms(young_length, &bytes_to_copy); 148 const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length); 149 const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms; 150 if (pause_time_ms > _target_pause_time_ms) { 151 // end condition 2: prediction is over the target pause time 152 return false; 153 } 154 155 const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes; 156 157 // When copying, we will likely need more bytes free than is live in the region. 158 // Add some safety margin to factor in the confidence of our guess, and the 159 // natural expected waste. 160 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 161 // of the calculation: the lower the confidence, the more headroom. 162 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 163 // copying due to anticipated waste in the PLABs. 164 const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 165 const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 166 167 if (expected_bytes_to_copy > free_bytes) { 168 // end condition 3: out-of-space 169 return false; 170 } 171 172 // success! 173 return true; 174 } 175 }; 176 177 void G1Policy::record_new_heap_size(uint new_number_of_regions) { 178 // re-calculate the necessary reserve 179 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 180 // We use ceiling so that if reserve_regions_d is > 0.0 (but 181 // smaller than 1.0) we'll get 1. 182 _reserve_regions = (uint) ceil(reserve_regions_d); 183 184 _young_gen_sizer->heap_size_changed(new_number_of_regions); 185 186 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 187 } 188 189 uint G1Policy::calculate_desired_eden_length_by_mmu() const { 190 // One could argue that any useful eden length to keep any MMU would be 1, but 191 // in theory this is possible. Other constraints enforce a minimum eden of 1 192 // anyway. 193 uint desired_min_length = 0; 194 if (use_adaptive_young_list_length()) { 195 double now_sec = os::elapsedTime(); 196 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 197 double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); 198 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 199 } 200 return desired_min_length; 201 } 202 203 void G1Policy::update_young_length_bounds() { 204 update_young_length_bounds(_analytics->predict_rs_length()); 205 } 206 207 void G1Policy::update_young_length_bounds(size_t rs_length) { 208 _young_list_desired_length = calculate_young_desired_length(rs_length); 209 _young_list_target_length = calculate_young_target_length(_young_list_desired_length); 210 _young_list_max_length = calculate_young_max_length(_young_list_target_length); 211 212 log_debug(gc,ergo,heap)("Young list lengths: desired: %u, target: %u, max: %u", 213 _young_list_desired_length, 214 _young_list_target_length, 215 _young_list_max_length); 216 } 217 218 // Calculates desired young gen length. It is calculated from: 219 // 220 // - sizer min/max bounds on young gen 221 // - pause time goal for whole young gen evacuation 222 // - MMU goal influencing eden to make GCs spaced apart. 223 // - a minimum one eden region length. 224 // 225 // We may enter with already allocated eden and survivor regions, that may be 226 // higher than the maximum, or the above goals may result in a desired value 227 // smaller than are already allocated. 228 // The main reason is revising young length, with our without the GCLocker being 229 // active. 230 // 231 uint G1Policy::calculate_young_desired_length(size_t rs_length) const { 232 uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length(); 233 uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length(); 234 235 assert(min_young_length_by_sizer >= 1, "invariant"); 236 assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant"); 237 238 // Absolute minimum eden length. 239 // Enforcing a minimum eden length helps at startup when the predictors are not 240 // yet trained on the application to avoid unnecessary (but very short) full gcs 241 // on very small (initial) heaps. 242 uint const MinDesiredEdenLength = 1; 243 244 // Calculate the absolute and desired min bounds first. 245 246 // This is how many survivor regions we already have. 247 const uint survivor_length = _g1h->survivor_regions_count(); 248 // Size of the already allocated young gen. 249 const uint allocated_young_length = _g1h->young_regions_count(); 250 // This is the absolute minimum young length that we can return. Ensure that we 251 // don't go below any user-defined minimum bound; but we might have already 252 // allocated more than that for reasons. In this case, use that. 253 uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer); 254 // Calculate the absolute max bounds. After evac failure or when revising the 255 // young length we might have exceeded absolute min length or absolute_max_length, 256 // so adjust the result accordingly. 257 uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length); 258 259 uint desired_eden_length_by_mmu = 0; 260 uint desired_eden_length_by_pause = 0; 261 uint desired_eden_length_before_mixed = 0; 262 263 uint desired_young_length = 0; 264 if (use_adaptive_young_list_length()) { 265 desired_eden_length_by_mmu = calculate_desired_eden_length_by_mmu(); 266 267 const size_t pending_cards = _analytics->predict_pending_cards(); 268 double survivor_base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length); 269 270 if (!next_gc_should_be_mixed(NULL, NULL)) { 271 desired_eden_length_by_pause = 272 calculate_desired_eden_length_by_pause(survivor_base_time_ms, 273 absolute_min_young_length - survivor_length, 274 absolute_max_young_length - survivor_length); 275 } else { 276 desired_eden_length_before_mixed = 277 calculate_desired_eden_length_before_mixed(survivor_base_time_ms, 278 absolute_min_young_length - survivor_length, 279 absolute_max_young_length - survivor_length); 280 } 281 // Above either sets desired_eden_length_by_pause or desired_eden_length_before_mixed, 282 // the other is zero. Use the one that has been set below. 283 uint desired_eden_length = MAX2(desired_eden_length_by_pause, 284 desired_eden_length_before_mixed); 285 286 // Finally incorporate MMU concerns; assume that it overrides the pause time 287 // goal, as the default value has been chosen to effectively disable it. 288 // Also request at least one eden region, see above for reasons. 289 desired_eden_length = MAX3(desired_eden_length, 290 desired_eden_length_by_mmu, 291 MinDesiredEdenLength); 292 293 desired_young_length = desired_eden_length + survivor_length; 294 } else { 295 // The user asked for a fixed young gen so we'll fix the young gen 296 // whether the next GC is young or mixed. 297 desired_young_length = min_young_length_by_sizer; 298 } 299 // Clamp to absolute min/max after we determined desired lengths. 300 desired_young_length = clamp(desired_young_length, absolute_min_young_length, absolute_max_young_length); 301 302 log_trace(gc, ergo, heap)("Young desired length %u " 303 "survivor length %u " 304 "allocated young length %u " 305 "absolute min young length %u " 306 "absolute max young length %u " 307 "desired eden length by mmu %u " 308 "desired eden length by pause %u " 309 "desired eden length before mixed %u" 310 "desired eden length by default %u", 311 desired_young_length, survivor_length, 312 allocated_young_length, absolute_min_young_length, 313 absolute_max_young_length, desired_eden_length_by_mmu, 314 desired_eden_length_by_pause, 315 desired_eden_length_before_mixed, 316 MinDesiredEdenLength); 317 318 assert(desired_young_length >= allocated_young_length, "must be"); 319 return desired_young_length; 320 } 321 322 // Limit the desired (wished) young length by current free regions. If the request 323 // can be satisfied without using up reserve regions, do so, otherwise eat into 324 // the reserve, giving away at most what the heap sizer allows. 325 uint G1Policy::calculate_young_target_length(uint desired_young_length) const { 326 uint allocated_young_length = _g1h->young_regions_count(); 327 328 uint receiving_additional_eden; 329 if (allocated_young_length >= desired_young_length) { 330 // Already used up all we actually want (may happen as G1 revises the 331 // young list length concurrently, or caused by gclocker). Do not allow more, 332 // potentially resulting in GC. 333 receiving_additional_eden = 0; 334 log_trace(gc, ergo, heap)("Young target length: Already used up desired young %u allocated %u", 335 desired_young_length, 336 allocated_young_length); 337 } else { 338 // Now look at how many free regions are there currently, and the heap reserve. 339 // We will try our best not to "eat" into the reserve as long as we can. If we 340 // do, we at most eat the sizer's minimum regions into the reserve or half the 341 // reserve rounded up (if possible; this is an arbitrary value). 342 343 uint max_to_eat_into_reserve = MIN2(_young_gen_sizer->min_desired_young_length(), 344 (_reserve_regions + 1) / 2); 345 346 log_trace(gc, ergo, heap)("Young target length: Common " 347 "free regions at end of collection %u " 348 "desired young length %u " 349 "reserve region %u " 350 "max to eat into reserve %u", 351 _free_regions_at_end_of_collection, 352 desired_young_length, 353 _reserve_regions, 354 max_to_eat_into_reserve); 355 356 if (_free_regions_at_end_of_collection <= _reserve_regions) { 357 // Fully eat (or already eating) into the reserve, hand back at most absolute_min_length regions. 358 uint receiving_young = MIN3(_free_regions_at_end_of_collection, 359 desired_young_length, 360 max_to_eat_into_reserve); 361 // We could already have allocated more regions than what we could get 362 // above. 363 receiving_additional_eden = allocated_young_length < receiving_young ? 364 receiving_young - allocated_young_length : 0; 365 366 log_trace(gc, ergo, heap)("Young target length: Fully eat into reserve " 367 "receiving young %u receiving additional eden %u", 368 receiving_young, 369 receiving_additional_eden); 370 } else if (_free_regions_at_end_of_collection < (desired_young_length + _reserve_regions)) { 371 // Partially eat into the reserve, at most max_to_eat_into_reserve regions. 372 uint free_outside_reserve = _free_regions_at_end_of_collection - _reserve_regions; 373 assert(free_outside_reserve < desired_young_length, 374 "must be %u %u", 375 free_outside_reserve, desired_young_length); 376 377 uint receiving_within_reserve = MIN2(desired_young_length - free_outside_reserve, 378 max_to_eat_into_reserve); 379 uint receiving_young = free_outside_reserve + receiving_within_reserve; 380 // Again, we could have already allocated more than we could get. 381 receiving_additional_eden = allocated_young_length < receiving_young ? 382 receiving_young - allocated_young_length : 0; 383 384 log_trace(gc, ergo, heap)("Young target length: Partially eat into reserve " 385 "free outside reserve %u " 386 "receiving within reserve %u " 387 "receiving young %u " 388 "receiving additional eden %u", 389 free_outside_reserve, receiving_within_reserve, 390 receiving_young, receiving_additional_eden); 391 } else { 392 // No need to use the reserve. 393 receiving_additional_eden = desired_young_length - allocated_young_length; 394 log_trace(gc, ergo, heap)("Young target length: No need to use reserve " 395 "receiving additional eden %u", 396 receiving_additional_eden); 397 } 398 } 399 400 uint target_young_length = allocated_young_length + receiving_additional_eden; 401 402 assert(target_young_length >= allocated_young_length, "must be"); 403 404 log_trace(gc, ergo, heap)("Young target length: " 405 "young target length %u " 406 "allocated young length %u " 407 "received additional eden %u", 408 target_young_length, allocated_young_length, 409 receiving_additional_eden); 410 return target_young_length; 411 } 412 413 uint G1Policy::calculate_desired_eden_length_by_pause(double base_time_ms, 414 uint min_eden_length, 415 uint max_eden_length) const { 416 assert(use_adaptive_young_list_length(), "pre-condition"); 417 418 assert(min_eden_length <= max_eden_length, "must be %u %u", min_eden_length, max_eden_length); 419 420 // Here, we will make sure that the shortest young length that 421 // makes sense fits within the target pause time. 422 423 G1YoungLengthPredictor p(base_time_ms, 424 _free_regions_at_end_of_collection, 425 _mmu_tracker->max_gc_time() * 1000.0, 426 this); 427 if (p.will_fit(min_eden_length)) { 428 // The shortest young length will fit into the target pause time; 429 // we'll now check whether the absolute maximum number of young 430 // regions will fit in the target pause time. If not, we'll do 431 // a binary search between min_young_length and max_young_length. 432 if (p.will_fit(max_eden_length)) { 433 // The maximum young length will fit into the target pause time. 434 // We are done so set min young length to the maximum length (as 435 // the result is assumed to be returned in min_young_length). 436 min_eden_length = max_eden_length; 437 } else { 438 // The maximum possible number of young regions will not fit within 439 // the target pause time so we'll search for the optimal 440 // length. The loop invariants are: 441 // 442 // min_young_length < max_young_length 443 // min_young_length is known to fit into the target pause time 444 // max_young_length is known not to fit into the target pause time 445 // 446 // Going into the loop we know the above hold as we've just 447 // checked them. Every time around the loop we check whether 448 // the middle value between min_young_length and 449 // max_young_length fits into the target pause time. If it 450 // does, it becomes the new min. If it doesn't, it becomes 451 // the new max. This way we maintain the loop invariants. 452 453 assert(min_eden_length < max_eden_length, "invariant"); 454 uint diff = (max_eden_length - min_eden_length) / 2; 455 while (diff > 0) { 456 uint eden_length = min_eden_length + diff; 457 if (p.will_fit(eden_length)) { 458 min_eden_length = eden_length; 459 } else { 460 max_eden_length = eden_length; 461 } 462 assert(min_eden_length < max_eden_length, "invariant"); 463 diff = (max_eden_length - min_eden_length) / 2; 464 } 465 // The results is min_young_length which, according to the 466 // loop invariants, should fit within the target pause time. 467 468 // These are the post-conditions of the binary search above: 469 assert(min_eden_length < max_eden_length, 470 "otherwise we should have discovered that max_eden_length " 471 "fits into the pause target and not done the binary search"); 472 assert(p.will_fit(min_eden_length), 473 "min_eden_length, the result of the binary search, should " 474 "fit into the pause target"); 475 assert(!p.will_fit(min_eden_length + 1), 476 "min_eden_length, the result of the binary search, should be " 477 "optimal, so no larger length should fit into the pause target"); 478 } 479 } else { 480 // Even the minimum length doesn't fit into the pause time 481 // target, return it as the result nevertheless. 482 } 483 return min_eden_length; 484 } 485 486 uint G1Policy::calculate_desired_eden_length_before_mixed(double survivor_base_time_ms, 487 uint min_eden_length, 488 uint max_eden_length) const { 489 G1CollectionSetCandidates* candidates = _collection_set->candidates(); 490 491 uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(), candidates->num_regions()); 492 double predicted_region_evac_time_ms = survivor_base_time_ms; 493 for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) { 494 HeapRegion* r = candidates->at(i); 495 predicted_region_evac_time_ms += predict_region_total_time_ms(r, false); 496 } 497 uint desired_eden_length_by_min_cset_length = 498 calculate_desired_eden_length_by_pause(predicted_region_evac_time_ms, 499 min_eden_length, 500 max_eden_length); 501 502 return desired_eden_length_by_min_cset_length; 503 } 504 505 double G1Policy::predict_survivor_regions_evac_time() const { 506 double survivor_regions_evac_time = 0.0; 507 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions(); 508 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); 509 it != survivor_regions->end(); 510 ++it) { 511 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase()); 512 } 513 return survivor_regions_evac_time; 514 } 515 516 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) { 517 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" ); 518 519 if (rs_length > _rs_length_prediction) { 520 // add 10% to avoid having to recalculate often 521 size_t rs_length_prediction = rs_length * 1100 / 1000; 522 update_rs_length_prediction(rs_length_prediction); 523 update_young_length_bounds(rs_length_prediction); 524 } 525 } 526 527 void G1Policy::update_rs_length_prediction() { 528 update_rs_length_prediction(_analytics->predict_rs_length()); 529 } 530 531 void G1Policy::update_rs_length_prediction(size_t prediction) { 532 if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) { 533 _rs_length_prediction = prediction; 534 } 535 } 536 537 void G1Policy::record_full_collection_start() { 538 _full_collection_start_sec = os::elapsedTime(); 539 // Release the future to-space so that it is available for compaction into. 540 collector_state()->set_in_young_only_phase(false); 541 collector_state()->set_in_full_gc(true); 542 _collection_set->clear_candidates(); 543 _pending_cards_at_gc_start = 0; 544 } 545 546 void G1Policy::record_full_collection_end() { 547 // Consider this like a collection pause for the purposes of allocation 548 // since last pause. 549 double end_sec = os::elapsedTime(); 550 double full_gc_time_sec = end_sec - _full_collection_start_sec; 551 double full_gc_time_ms = full_gc_time_sec * 1000.0; 552 553 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); 554 555 collector_state()->set_in_full_gc(false); 556 557 // "Nuke" the heuristics that control the young/mixed GC 558 // transitions and make sure we start with young GCs after the Full GC. 559 collector_state()->set_in_young_only_phase(true); 560 collector_state()->set_in_young_gc_before_mixed(false); 561 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 562 collector_state()->set_in_initial_mark_gc(false); 563 collector_state()->set_mark_or_rebuild_in_progress(false); 564 collector_state()->set_clearing_next_bitmap(false); 565 566 _eden_surv_rate_group->start_adding_regions(); 567 // also call this on any additional surv rate groups 568 569 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 570 _survivor_surv_rate_group->reset(); 571 update_young_length_bounds(); 572 update_rs_length_prediction(); 573 574 _old_gen_alloc_tracker.reset_after_full_gc(); 575 576 record_pause(FullGC, _full_collection_start_sec, end_sec); 577 } 578 579 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) { 580 log_debug(gc, refine, stats) 581 ("%s refinement: %.2fms, refined: " SIZE_FORMAT 582 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT, 583 kind, 584 stats.refinement_time().seconds() * MILLIUNITS, 585 stats.refined_cards(), 586 stats.precleaned_cards(), 587 stats.dirtied_cards()); 588 } 589 590 void G1Policy::record_concurrent_refinement_stats() { 591 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 592 _pending_cards_at_gc_start = dcqs.num_cards(); 593 594 // Collect per-thread stats, mostly from mutator activity. 595 G1ConcurrentRefineStats mut_stats = dcqs.get_and_reset_refinement_stats(); 596 597 // Collect specialized concurrent refinement thread stats. 598 G1ConcurrentRefine* cr = _g1h->concurrent_refine(); 599 G1ConcurrentRefineStats cr_stats = cr->get_and_reset_refinement_stats(); 600 601 G1ConcurrentRefineStats total_stats = mut_stats + cr_stats; 602 603 log_refinement_stats("Mutator", mut_stats); 604 log_refinement_stats("Concurrent", cr_stats); 605 log_refinement_stats("Total", total_stats); 606 607 // Record the rate at which cards were refined. 608 // Don't update the rate if the current sample is empty or time is zero. 609 Tickspan refinement_time = total_stats.refinement_time(); 610 size_t refined_cards = total_stats.refined_cards(); 611 if ((refined_cards > 0) && (refinement_time > Tickspan())) { 612 double rate = refined_cards / (refinement_time.seconds() * MILLIUNITS); 613 _analytics->report_concurrent_refine_rate_ms(rate); 614 log_debug(gc, refine, stats)("Concurrent refinement rate: %.2f cards/ms", rate); 615 } 616 617 // Record mutator's card logging rate. 618 double mut_start_time = _analytics->prev_collection_pause_end_ms(); 619 double mut_end_time = phase_times()->cur_collection_start_sec() * MILLIUNITS; 620 double mut_time = mut_end_time - mut_start_time; 621 // Unlike above for conc-refine rate, here we should not require a 622 // non-empty sample, since an application could go some time with only 623 // young-gen or filtered out writes. But we'll ignore unusually short 624 // sample periods, as they may just pollute the predictions. 625 if (mut_time > 1.0) { // Require > 1ms sample time. 626 double dirtied_rate = total_stats.dirtied_cards() / mut_time; 627 _analytics->report_dirtied_cards_rate_ms(dirtied_rate); 628 log_debug(gc, refine, stats)("Generate dirty cards rate: %.2f cards/ms", dirtied_rate); 629 } 630 } 631 632 void G1Policy::record_collection_pause_start(double start_time_sec) { 633 // We only need to do this here as the policy will only be applied 634 // to the GC we're about to start. so, no point is calculating this 635 // every time we calculate / recalculate the target young length. 636 update_survivors_policy(); 637 638 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(), 639 "Maximum survivor regions %u plus used regions %u exceeds max regions %u", 640 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions()); 641 assert_used_and_recalculate_used_equal(_g1h); 642 643 phase_times()->record_cur_collection_start_sec(start_time_sec); 644 645 record_concurrent_refinement_stats(); 646 647 _collection_set->reset_bytes_used_before(); 648 649 // do that for any other surv rate groups 650 _eden_surv_rate_group->stop_adding_regions(); 651 _survivors_age_table.clear(); 652 653 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); 654 } 655 656 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { 657 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 658 collector_state()->set_in_initial_mark_gc(false); 659 } 660 661 void G1Policy::record_concurrent_mark_remark_start() { 662 _mark_remark_start_sec = os::elapsedTime(); 663 } 664 665 void G1Policy::record_concurrent_mark_remark_end() { 666 double end_time_sec = os::elapsedTime(); 667 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 668 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); 669 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 670 671 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 672 } 673 674 void G1Policy::record_concurrent_mark_cleanup_start() { 675 _mark_cleanup_start_sec = os::elapsedTime(); 676 } 677 678 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 679 return phase_times()->average_time_ms(phase); 680 } 681 682 double G1Policy::young_other_time_ms() const { 683 return phase_times()->young_cset_choice_time_ms() + 684 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet); 685 } 686 687 double G1Policy::non_young_other_time_ms() const { 688 return phase_times()->non_young_cset_choice_time_ms() + 689 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet); 690 } 691 692 double G1Policy::other_time_ms(double pause_time_ms) const { 693 return pause_time_ms - phase_times()->cur_collection_par_time_ms(); 694 } 695 696 double G1Policy::constant_other_time_ms(double pause_time_ms) const { 697 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms() - phase_times()->total_rebuild_freelist_time_ms(); 698 } 699 700 bool G1Policy::about_to_start_mixed_phase() const { 701 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed(); 702 } 703 704 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 705 if (about_to_start_mixed_phase()) { 706 return false; 707 } 708 709 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 710 711 size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); 712 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 713 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 714 715 bool result = false; 716 if (marking_request_bytes > marking_initiating_used_threshold) { 717 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed(); 718 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 719 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 720 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source); 721 } 722 723 return result; 724 } 725 726 double G1Policy::logged_cards_processing_time() const { 727 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); 728 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); 729 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + 730 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); 731 // This may happen if there are duplicate cards in different log buffers. 732 if (logged_dirty_cards > scan_heap_roots_cards) { 733 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB); 734 } 735 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB); 736 } 737 738 // Anything below that is considered to be zero 739 #define MIN_TIMER_GRANULARITY 0.0000001 740 741 void G1Policy::record_collection_pause_end(double pause_time_ms) { 742 G1GCPhaseTimes* p = phase_times(); 743 744 double end_time_sec = os::elapsedTime(); 745 746 bool this_pause_included_initial_mark = false; 747 bool this_pause_was_young_only = collector_state()->in_young_only_phase(); 748 749 bool update_stats = !_g1h->evacuation_failed(); 750 751 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 752 753 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc(); 754 if (this_pause_included_initial_mark) { 755 record_concurrent_mark_init_end(0.0); 756 } else { 757 maybe_start_marking(); 758 } 759 760 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); 761 if (app_time_ms < MIN_TIMER_GRANULARITY) { 762 // This usually happens due to the timer not having the required 763 // granularity. Some Linuxes are the usual culprits. 764 // We'll just set it to something (arbitrarily) small. 765 app_time_ms = 1.0; 766 } 767 768 if (update_stats) { 769 // We maintain the invariant that all objects allocated by mutator 770 // threads will be allocated out of eden regions. So, we can use 771 // the eden region number allocated since the previous GC to 772 // calculate the application's allocate rate. The only exception 773 // to that is humongous objects that are allocated separately. But 774 // given that humongous object allocations do not really affect 775 // either the pause's duration nor when the next pause will take 776 // place we can safely ignore them here. 777 uint regions_allocated = _collection_set->eden_region_length(); 778 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 779 _analytics->report_alloc_rate_ms(alloc_rate_ms); 780 781 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); 782 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); 783 } 784 785 if (collector_state()->in_young_gc_before_mixed()) { 786 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC"); 787 // This has been the young GC before we start doing mixed GCs. We already 788 // decided to start mixed GCs much earlier, so there is nothing to do except 789 // advancing the state. 790 collector_state()->set_in_young_only_phase(false); 791 collector_state()->set_in_young_gc_before_mixed(false); 792 } else if (!this_pause_was_young_only) { 793 // This is a mixed GC. Here we decide whether to continue doing more 794 // mixed GCs or not. 795 if (!next_gc_should_be_mixed("continue mixed GCs", 796 "do not continue mixed GCs")) { 797 collector_state()->set_in_young_only_phase(true); 798 799 clear_collection_set_candidates(); 800 maybe_start_marking(); 801 } 802 } 803 804 _eden_surv_rate_group->start_adding_regions(); 805 806 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC); 807 if (update_stats) { 808 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) + 809 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); 810 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase. 811 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) + 812 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) + 813 total_log_buffer_cards; 814 815 // The threshold for the number of cards in a given sampling which we consider 816 // large enough so that the impact from setup and other costs is negligible. 817 size_t const CardsNumSamplingThreshold = 10; 818 819 if (total_cards_merged > CardsNumSamplingThreshold) { 820 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) + 821 average_time_ms(G1GCPhaseTimes::MergeRS) + 822 average_time_ms(G1GCPhaseTimes::MergeHCC) + 823 average_time_ms(G1GCPhaseTimes::MergeLB) + 824 average_time_ms(G1GCPhaseTimes::OptMergeRS); 825 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only); 826 } 827 828 // Update prediction for card scan 829 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + 830 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); 831 832 if (total_cards_scanned > CardsNumSamplingThreshold) { 833 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) + 834 average_time_ms(G1GCPhaseTimes::OptScanHR); 835 836 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only); 837 } 838 839 // Update prediction for the ratio between cards from the remembered 840 // sets and actually scanned cards from the remembered sets. 841 // Cards from the remembered sets are all cards not duplicated by cards from 842 // the logs. 843 // Due to duplicates in the log buffers, the number of actually scanned cards 844 // can be smaller than the cards in the log buffers. 845 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0; 846 double merge_to_scan_ratio = 0.0; 847 if (total_cards_scanned > 0) { 848 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned; 849 } 850 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only); 851 852 const size_t recorded_rs_length = _collection_set->recorded_rs_length(); 853 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0; 854 _analytics->report_rs_length_diff(rs_length_diff); 855 856 // Update prediction for copy cost per byte 857 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes); 858 859 if (copied_bytes > 0) { 860 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes; 861 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress()); 862 } 863 864 if (_collection_set->young_region_length() > 0) { 865 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / 866 _collection_set->young_region_length()); 867 } 868 869 if (_collection_set->old_region_length() > 0) { 870 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / 871 _collection_set->old_region_length()); 872 } 873 874 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); 875 876 // Do not update RS lengths and the number of pending cards with information from mixed gc: 877 // these are is wildly different to during young only gc and mess up young gen sizing right 878 // after the mixed gc phase. 879 // During mixed gc we do not use them for young gen sizing. 880 if (this_pause_was_young_only) { 881 _analytics->report_pending_cards((double) _pending_cards_at_gc_start); 882 _analytics->report_rs_length((double) _rs_length); 883 } 884 } 885 886 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()), 887 "If the last pause has been an initial mark, we should not have been in the marking window"); 888 if (this_pause_included_initial_mark) { 889 collector_state()->set_mark_or_rebuild_in_progress(true); 890 } 891 892 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 893 894 update_rs_length_prediction(); 895 896 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely 897 // that in this case we are not running in a "normal" operating mode. 898 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { 899 update_young_length_bounds(); 900 901 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); 902 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), 903 _old_gen_alloc_tracker.last_cycle_old_bytes(), 904 this_pause_was_young_only); 905 906 _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); 907 } else { 908 // Any garbage collection triggered as periodic collection resets the time-to-mixed 909 // measurement. Periodic collection typically means that the application is "inactive", i.e. 910 // the marking threads may have received an uncharacterisic amount of cpu time 911 // for completing the marking, i.e. are faster than expected. 912 // This skews the predicted marking length towards smaller values which might cause 913 // the mark start being too late. 914 _initial_mark_to_mixed.reset(); 915 } 916 917 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 918 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 919 920 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) { 921 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 922 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms", 923 scan_logged_cards_time_goal_ms, merge_hcc_time_ms); 924 925 scan_logged_cards_time_goal_ms = 0; 926 } else { 927 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms; 928 } 929 930 double const logged_cards_time = logged_cards_processing_time(); 931 932 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms", 933 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms); 934 935 _g1h->concurrent_refine()->adjust(logged_cards_time, 936 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards), 937 scan_logged_cards_time_goal_ms); 938 } 939 940 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){ 941 if (G1UseAdaptiveIHOP) { 942 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 943 predictor, 944 G1ReservePercent, 945 G1HeapWastePercent); 946 } else { 947 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 948 } 949 } 950 951 void G1Policy::update_ihop_prediction(double mutator_time_s, 952 size_t mutator_alloc_bytes, 953 bool this_gc_was_young_only) { 954 // Always try to update IHOP prediction. Even evacuation failures give information 955 // about e.g. whether to start IHOP earlier next time. 956 957 // Avoid using really small application times that might create samples with 958 // very high or very low values. They may be caused by e.g. back-to-back gcs. 959 double const min_valid_time = 1e-6; 960 961 bool report = false; 962 963 double marking_to_mixed_time = -1.0; 964 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) { 965 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 966 assert(marking_to_mixed_time > 0.0, 967 "Initial mark to mixed time must be larger than zero but is %.3f", 968 marking_to_mixed_time); 969 if (marking_to_mixed_time > min_valid_time) { 970 _ihop_control->update_marking_length(marking_to_mixed_time); 971 report = true; 972 } 973 } 974 975 // As an approximation for the young gc promotion rates during marking we use 976 // all of them. In many applications there are only a few if any young gcs during 977 // marking, which makes any prediction useless. This increases the accuracy of the 978 // prediction. 979 if (this_gc_was_young_only && mutator_time_s > min_valid_time) { 980 // IHOP control wants to know the expected young gen length if it were not 981 // restrained by the heap reserve. Using the actual length would make the 982 // prediction too small and the limit the young gen every time we get to the 983 // predicted target occupancy. 984 uint young_gen_size = young_list_desired_length() * HeapRegion::GrainBytes; 985 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 986 report = true; 987 } 988 989 if (report) { 990 report_ihop_statistics(); 991 } 992 } 993 994 void G1Policy::report_ihop_statistics() { 995 _ihop_control->print(); 996 } 997 998 void G1Policy::print_phases() { 999 phase_times()->print(); 1000 } 1001 1002 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards, 1003 size_t rs_length) const { 1004 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase()); 1005 return 1006 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) + 1007 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) + 1008 _analytics->predict_constant_other_time_ms() + 1009 predict_survivor_regions_evac_time(); 1010 } 1011 1012 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1013 size_t rs_length = _analytics->predict_rs_length(); 1014 return predict_base_elapsed_time_ms(pending_cards, rs_length); 1015 } 1016 1017 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { 1018 size_t bytes_to_copy; 1019 if (!hr->is_young()) { 1020 bytes_to_copy = hr->max_live_bytes(); 1021 } else { 1022 bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor)); 1023 } 1024 return bytes_to_copy; 1025 } 1026 1027 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const { 1028 if (count == 0) { 1029 return 0.0; 1030 } 1031 size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes; 1032 if (bytes_to_copy != NULL) { 1033 *bytes_to_copy = expected_bytes; 1034 } 1035 return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress()); 1036 } 1037 1038 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const { 1039 size_t const bytes_to_copy = predict_bytes_to_copy(hr); 1040 return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress()); 1041 } 1042 1043 double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr, 1044 bool for_young_gc) const { 1045 size_t rs_length = hr->rem_set()->occupied(); 1046 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc); 1047 1048 double region_elapsed_time_ms = 1049 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) + 1050 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase()); 1051 1052 // The prediction of the "other" time for this region is based 1053 // upon the region type and NOT the GC type. 1054 if (hr->is_young()) { 1055 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); 1056 } else { 1057 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); 1058 } 1059 return region_elapsed_time_ms; 1060 } 1061 1062 double G1Policy::predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const { 1063 return predict_region_non_copy_time_ms(hr, for_young_gc) + predict_region_copy_time_ms(hr); 1064 } 1065 1066 bool G1Policy::should_allocate_mutator_region() const { 1067 uint young_list_length = _g1h->young_regions_count(); 1068 uint young_list_target_length = _young_list_target_length; 1069 return young_list_length < young_list_target_length; 1070 } 1071 1072 bool G1Policy::can_expand_young_list() const { 1073 uint young_list_length = _g1h->young_regions_count(); 1074 uint young_list_max_length = _young_list_max_length; 1075 return young_list_length < young_list_max_length; 1076 } 1077 1078 bool G1Policy::use_adaptive_young_list_length() const { 1079 return _young_gen_sizer->use_adaptive_young_list_length(); 1080 } 1081 1082 size_t G1Policy::desired_survivor_size(uint max_regions) const { 1083 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions; 1084 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); 1085 } 1086 1087 void G1Policy::print_age_table() { 1088 _survivors_age_table.print_age_table(_tenuring_threshold); 1089 } 1090 1091 uint G1Policy::calculate_young_max_length(uint target_young_length) const { 1092 uint expansion_region_num = 0; 1093 if (GCLockerEdenExpansionPercent > 0) { 1094 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1095 double expansion_region_num_d = perc * (double) _young_list_target_length; 1096 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1097 // less than 1.0) we'll get 1. 1098 expansion_region_num = (uint) ceil(expansion_region_num_d); 1099 } else { 1100 assert(expansion_region_num == 0, "sanity"); 1101 } 1102 uint max_length = target_young_length + expansion_region_num; 1103 assert(target_young_length <= max_length, "post-condition"); 1104 return max_length; 1105 } 1106 1107 // Calculates survivor space parameters. 1108 void G1Policy::update_survivors_policy() { 1109 double max_survivor_regions_d = 1110 (double) _young_list_target_length / (double) SurvivorRatio; 1111 1112 // Calculate desired survivor size based on desired max survivor regions (unconstrained 1113 // by remaining heap). Otherwise we may cause undesired promotions as we are 1114 // already getting close to end of the heap, impacting performance even more. 1115 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d); 1116 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions); 1117 1118 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size); 1119 if (UsePerfData) { 1120 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold); 1121 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize); 1122 } 1123 // The real maximum survivor size is bounded by the number of regions that can 1124 // be allocated into. 1125 _max_survivor_regions = MIN2(desired_max_survivor_regions, 1126 _g1h->num_free_or_available_regions()); 1127 } 1128 1129 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1130 // We actually check whether we are marking here and not if we are in a 1131 // reclamation phase. This means that we will schedule a concurrent mark 1132 // even while we are still in the process of reclaiming memory. 1133 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); 1134 if (!during_cycle) { 1135 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1136 collector_state()->set_initiate_conc_mark_if_possible(true); 1137 return true; 1138 } else { 1139 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1140 return false; 1141 } 1142 } 1143 1144 void G1Policy::initiate_conc_mark() { 1145 collector_state()->set_in_initial_mark_gc(true); 1146 collector_state()->set_initiate_conc_mark_if_possible(false); 1147 } 1148 1149 void G1Policy::decide_on_conc_mark_initiation() { 1150 // We are about to decide on whether this pause will be an 1151 // initial-mark pause. 1152 1153 // First, collector_state()->in_initial_mark_gc() should not be already set. We 1154 // will set it here if we have to. However, it should be cleared by 1155 // the end of the pause (it's only set for the duration of an 1156 // initial-mark pause). 1157 assert(!collector_state()->in_initial_mark_gc(), "pre-condition"); 1158 1159 if (collector_state()->initiate_conc_mark_if_possible()) { 1160 // We had noticed on a previous pause that the heap occupancy has 1161 // gone over the initiating threshold and we should start a 1162 // concurrent marking cycle. Or we've been explicitly requested 1163 // to start a concurrent marking cycle. Either way, we initiate 1164 // one if not inhibited for some reason. 1165 1166 GCCause::Cause cause = _g1h->gc_cause(); 1167 if ((cause != GCCause::_wb_breakpoint) && 1168 ConcurrentGCBreakpoints::is_controlled()) { 1169 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); 1170 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { 1171 // Initiate a new initial mark if there is no marking or reclamation going on. 1172 initiate_conc_mark(); 1173 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1174 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || 1175 (cause == GCCause::_wb_breakpoint)) { 1176 // Initiate a user requested initial mark or run_to a breakpoint. 1177 // An initial mark must be young only GC, so the collector state 1178 // must be updated to reflect this. 1179 collector_state()->set_in_young_only_phase(true); 1180 collector_state()->set_in_young_gc_before_mixed(false); 1181 1182 // We might have ended up coming here about to start a mixed phase with a collection set 1183 // active. The following remark might change the change the "evacuation efficiency" of 1184 // the regions in this set, leading to failing asserts later. 1185 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here. 1186 clear_collection_set_candidates(); 1187 abort_time_to_mixed_tracking(); 1188 initiate_conc_mark(); 1189 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)", 1190 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user"); 1191 } else { 1192 // The concurrent marking thread is still finishing up the 1193 // previous cycle. If we start one right now the two cycles 1194 // overlap. In particular, the concurrent marking thread might 1195 // be in the process of clearing the next marking bitmap (which 1196 // we will use for the next cycle if we start one). Starting a 1197 // cycle now will be bad given that parts of the marking 1198 // information might get cleared by the marking thread. And we 1199 // cannot wait for the marking thread to finish the cycle as it 1200 // periodically yields while clearing the next marking bitmap 1201 // and, if it's in a yield point, it's waiting for us to 1202 // finish. So, at this point we will not start a cycle and we'll 1203 // let the concurrent marking thread complete the last one. 1204 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1205 } 1206 } 1207 } 1208 1209 void G1Policy::record_concurrent_mark_cleanup_end() { 1210 G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions()); 1211 _collection_set->set_candidates(candidates); 1212 1213 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs"); 1214 if (!mixed_gc_pending) { 1215 clear_collection_set_candidates(); 1216 abort_time_to_mixed_tracking(); 1217 } 1218 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); 1219 collector_state()->set_mark_or_rebuild_in_progress(false); 1220 1221 double end_sec = os::elapsedTime(); 1222 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1223 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); 1224 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 1225 1226 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1227 } 1228 1229 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { 1230 return percent_of(reclaimable_bytes, _g1h->capacity()); 1231 } 1232 1233 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { 1234 virtual bool do_heap_region(HeapRegion* r) { 1235 r->rem_set()->clear_locked(true /* only_cardset */); 1236 return false; 1237 } 1238 }; 1239 1240 void G1Policy::clear_collection_set_candidates() { 1241 // Clear remembered sets of remaining candidate regions and the actual candidate 1242 // set. 1243 G1ClearCollectionSetCandidateRemSets cl; 1244 _collection_set->candidates()->iterate(&cl); 1245 _collection_set->clear_candidates(); 1246 } 1247 1248 void G1Policy::maybe_start_marking() { 1249 if (need_to_start_conc_mark("end of GC")) { 1250 // Note: this might have already been set, if during the last 1251 // pause we decided to start a cycle but at the beginning of 1252 // this pause we decided to postpone it. That's OK. 1253 collector_state()->set_initiate_conc_mark_if_possible(true); 1254 } 1255 } 1256 1257 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { 1258 assert(!collector_state()->in_full_gc(), "must be"); 1259 if (collector_state()->in_initial_mark_gc()) { 1260 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1261 return InitialMarkGC; 1262 } else if (collector_state()->in_young_gc_before_mixed()) { 1263 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1264 return LastYoungGC; 1265 } else if (collector_state()->in_mixed_phase()) { 1266 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1267 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1268 return MixedGC; 1269 } else { 1270 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1271 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1272 return YoungOnlyGC; 1273 } 1274 } 1275 1276 void G1Policy::record_pause(PauseKind kind, double start, double end) { 1277 // Manage the MMU tracker. For some reason it ignores Full GCs. 1278 if (kind != FullGC) { 1279 _mmu_tracker->add_pause(start, end); 1280 } 1281 // Manage the mutator time tracking from initial mark to first mixed gc. 1282 switch (kind) { 1283 case FullGC: 1284 abort_time_to_mixed_tracking(); 1285 break; 1286 case Cleanup: 1287 case Remark: 1288 case YoungOnlyGC: 1289 case LastYoungGC: 1290 _initial_mark_to_mixed.add_pause(end - start); 1291 break; 1292 case InitialMarkGC: 1293 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { 1294 _initial_mark_to_mixed.record_initial_mark_end(end); 1295 } 1296 break; 1297 case MixedGC: 1298 _initial_mark_to_mixed.record_mixed_gc_start(start); 1299 break; 1300 default: 1301 ShouldNotReachHere(); 1302 } 1303 } 1304 1305 void G1Policy::abort_time_to_mixed_tracking() { 1306 _initial_mark_to_mixed.reset(); 1307 } 1308 1309 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, 1310 const char* false_action_str) const { 1311 G1CollectionSetCandidates* candidates = _collection_set->candidates(); 1312 1313 if (candidates == NULL || candidates->is_empty()) { 1314 if (false_action_str != NULL) { 1315 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1316 } 1317 return false; 1318 } 1319 1320 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1321 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); 1322 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); 1323 double threshold = (double) G1HeapWastePercent; 1324 if (reclaimable_percent <= threshold) { 1325 if (false_action_str != NULL) { 1326 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1327 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); 1328 } 1329 return false; 1330 } 1331 if (true_action_str != NULL) { 1332 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1333 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); 1334 } 1335 return true; 1336 } 1337 1338 uint G1Policy::calc_min_old_cset_length() const { 1339 // The min old CSet region bound is based on the maximum desired 1340 // number of mixed GCs after a cycle. I.e., even if some old regions 1341 // look expensive, we should add them to the CSet anyway to make 1342 // sure we go through the available old regions in no more than the 1343 // maximum desired number of mixed GCs. 1344 // 1345 // The calculation is based on the number of marked regions we added 1346 // to the CSet candidates in the first place, not how many remain, so 1347 // that the result is the same during all mixed GCs that follow a cycle. 1348 1349 const size_t region_num = _collection_set->candidates()->num_regions(); 1350 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1351 size_t result = region_num / gc_num; 1352 // emulate ceiling 1353 if (result * gc_num < region_num) { 1354 result += 1; 1355 } 1356 return (uint) result; 1357 } 1358 1359 uint G1Policy::calc_max_old_cset_length() const { 1360 // The max old CSet region bound is based on the threshold expressed 1361 // as a percentage of the heap size. I.e., it should bound the 1362 // number of old regions added to the CSet irrespective of how many 1363 // of them are available. 1364 1365 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1366 const size_t region_num = g1h->num_regions(); 1367 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1368 size_t result = region_num * perc / 100; 1369 // emulate ceiling 1370 if (100 * result < region_num * perc) { 1371 result += 1; 1372 } 1373 return (uint) result; 1374 } 1375 1376 void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, 1377 double time_remaining_ms, 1378 uint& num_initial_regions, 1379 uint& num_optional_regions) { 1380 assert(candidates != NULL, "Must be"); 1381 1382 num_initial_regions = 0; 1383 num_optional_regions = 0; 1384 uint num_expensive_regions = 0; 1385 1386 double predicted_old_time_ms = 0.0; 1387 double predicted_initial_time_ms = 0.0; 1388 double predicted_optional_time_ms = 0.0; 1389 1390 double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction(); 1391 1392 const uint min_old_cset_length = calc_min_old_cset_length(); 1393 const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length()); 1394 const uint max_optional_regions = max_old_cset_length - min_old_cset_length; 1395 bool check_time_remaining = use_adaptive_young_list_length(); 1396 1397 uint candidate_idx = candidates->cur_idx(); 1398 1399 log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, " 1400 "time remaining %1.2fms, optional threshold %1.2fms", 1401 min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms); 1402 1403 HeapRegion* hr = candidates->at(candidate_idx); 1404 while (hr != NULL) { 1405 if (num_initial_regions + num_optional_regions >= max_old_cset_length) { 1406 // Added maximum number of old regions to the CSet. 1407 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). " 1408 "Initial %u regions, optional %u regions", 1409 num_initial_regions, num_optional_regions); 1410 break; 1411 } 1412 1413 // Stop adding regions if the remaining reclaimable space is 1414 // not above G1HeapWastePercent. 1415 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); 1416 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); 1417 double threshold = (double) G1HeapWastePercent; 1418 if (reclaimable_percent <= threshold) { 1419 // We've added enough old regions that the amount of uncollected 1420 // reclaimable space is at or below the waste threshold. Stop 1421 // adding old regions to the CSet. 1422 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). " 1423 "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%", 1424 byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes), 1425 reclaimable_percent, G1HeapWastePercent); 1426 break; 1427 } 1428 1429 double predicted_time_ms = predict_region_total_time_ms(hr, false); 1430 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 1431 // Add regions to old set until we reach the minimum amount 1432 if (num_initial_regions < min_old_cset_length) { 1433 predicted_old_time_ms += predicted_time_ms; 1434 num_initial_regions++; 1435 // Record the number of regions added with no time remaining 1436 if (time_remaining_ms == 0.0) { 1437 num_expensive_regions++; 1438 } 1439 } else if (!check_time_remaining) { 1440 // In the non-auto-tuning case, we'll finish adding regions 1441 // to the CSet if we reach the minimum. 1442 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min)."); 1443 break; 1444 } else { 1445 // Keep adding regions to old set until we reach the optional threshold 1446 if (time_remaining_ms > optional_threshold_ms) { 1447 predicted_old_time_ms += predicted_time_ms; 1448 num_initial_regions++; 1449 } else if (time_remaining_ms > 0) { 1450 // Keep adding optional regions until time is up. 1451 assert(num_optional_regions < max_optional_regions, "Should not be possible."); 1452 predicted_optional_time_ms += predicted_time_ms; 1453 num_optional_regions++; 1454 } else { 1455 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high)."); 1456 break; 1457 } 1458 } 1459 hr = candidates->at(++candidate_idx); 1460 } 1461 if (hr == NULL) { 1462 log_debug(gc, ergo, cset)("Old candidate collection set empty."); 1463 } 1464 1465 if (num_expensive_regions > 0) { 1466 log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.", 1467 num_expensive_regions); 1468 } 1469 1470 log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, " 1471 "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f", 1472 num_initial_regions, num_optional_regions, 1473 predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms); 1474 } 1475 1476 void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, 1477 uint const max_optional_regions, 1478 double time_remaining_ms, 1479 uint& num_optional_regions) { 1480 assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase"); 1481 1482 num_optional_regions = 0; 1483 double prediction_ms = 0; 1484 uint candidate_idx = candidates->cur_idx(); 1485 1486 HeapRegion* r = candidates->at(candidate_idx); 1487 while (num_optional_regions < max_optional_regions) { 1488 assert(r != NULL, "Region must exist"); 1489 prediction_ms += predict_region_total_time_ms(r, false); 1490 1491 if (prediction_ms > time_remaining_ms) { 1492 log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.", 1493 prediction_ms, r->hrm_index(), time_remaining_ms); 1494 break; 1495 } 1496 // This region will be included in the next optional evacuation. 1497 1498 time_remaining_ms -= prediction_ms; 1499 num_optional_regions++; 1500 r = candidates->at(++candidate_idx); 1501 } 1502 1503 log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms", 1504 num_optional_regions, max_optional_regions, prediction_ms); 1505 } 1506 1507 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { 1508 note_start_adding_survivor_regions(); 1509 1510 HeapRegion* last = NULL; 1511 for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin(); 1512 it != survivors->regions()->end(); 1513 ++it) { 1514 HeapRegion* curr = *it; 1515 set_region_survivor(curr); 1516 1517 // The region is a non-empty survivor so let's add it to 1518 // the incremental collection set for the next evacuation 1519 // pause. 1520 _collection_set->add_survivor_regions(curr); 1521 1522 last = curr; 1523 } 1524 note_stop_adding_survivor_regions(); 1525 1526 // Don't clear the survivor list handles until the start of 1527 // the next evacuation pause - we need it in order to re-tag 1528 // the survivor regions from this evacuation pause as 'young' 1529 // at the start of the next. 1530 } --- EOF ---