1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Analytics.hpp" 27 #include "gc/g1/g1Arguments.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectionSetCandidates.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1ConcurrentRefine.hpp" 34 #include "gc/g1/g1ConcurrentRefineStats.hpp" 35 #include "gc/g1/g1CollectionSetChooser.hpp" 36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp" 37 #include "gc/g1/g1HotCardCache.hpp" 38 #include "gc/g1/g1IHOPControl.hpp" 39 #include "gc/g1/g1GCPhaseTimes.hpp" 40 #include "gc/g1/g1Policy.hpp" 41 #include "gc/g1/g1SurvivorRegions.hpp" 42 #include "gc/g1/g1YoungGenSizer.hpp" 43 #include "gc/g1/heapRegion.inline.hpp" 44 #include "gc/g1/heapRegionRemSet.hpp" 45 #include "gc/shared/concurrentGCBreakpoints.hpp" 46 #include "gc/shared/gcPolicyCounters.hpp" 47 #include "logging/log.hpp" 48 #include "runtime/arguments.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/mutexLocker.hpp" 52 #include "utilities/debug.hpp" 53 #include "utilities/growableArray.hpp" 54 #include "utilities/pair.hpp" 55 56 G1Policy::G1Policy(STWGCTimer* gc_timer) : 57 _predictor(G1ConfidencePercent / 100.0), 58 _analytics(new G1Analytics(&_predictor)), 59 _remset_tracker(), 60 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), 61 _ihop_control(create_ihop_control(&_predictor)), 62 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)), 63 _full_collection_start_sec(0.0), 64 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), 65 _young_list_target_length(0), 66 _young_list_max_length(0), 67 _eden_surv_rate_group(new G1SurvRateGroup()), 68 _survivor_surv_rate_group(new G1SurvRateGroup()), 69 _reserve_factor((double) G1ReservePercent / 100.0), 70 _reserve_regions(0), 71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()), 72 _free_regions_at_end_of_collection(0), 73 _rs_length(0), 74 _rs_length_prediction(0), 75 _pending_cards_at_gc_start(0), 76 _old_gen_alloc_tracker(), 77 _initial_mark_to_mixed(), 78 _collection_set(NULL), 79 _g1h(NULL), 80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), 81 _mark_remark_start_sec(0), 82 _mark_cleanup_start_sec(0), 83 _tenuring_threshold(MaxTenuringThreshold), 84 _max_survivor_regions(0), 85 _survivors_age_table(true) 86 { 87 } 88 89 G1Policy::~G1Policy() { 90 delete _ihop_control; 91 delete _young_gen_sizer; 92 } 93 94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) { 95 if (G1Arguments::is_heterogeneous_heap()) { 96 return new G1HeterogeneousHeapPolicy(gc_timer_stw); 97 } else { 98 return new G1Policy(gc_timer_stw); 99 } 100 } 101 102 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); } 103 104 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { 105 _g1h = g1h; 106 _collection_set = collection_set; 107 108 assert(Heap_lock->owned_by_self(), "Locking discipline."); 109 110 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions()); 111 112 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 113 114 update_young_max_and_target_length(); 115 // We may immediately start allocating regions and placing them on the 116 // collection set list. Initialize the per-collection set info 117 _collection_set->start_incremental_building(); 118 } 119 120 void G1Policy::note_gc_start() { 121 phase_times()->note_gc_start(); 122 } 123 124 class G1YoungLengthPredictor { 125 const double _base_time_ms; 126 const double _base_free_regions; 127 const double _target_pause_time_ms; 128 const G1Policy* const _policy; 129 130 public: 131 G1YoungLengthPredictor(double base_time_ms, 132 double base_free_regions, 133 double target_pause_time_ms, 134 const G1Policy* policy) : 135 _base_time_ms(base_time_ms), 136 _base_free_regions(base_free_regions), 137 _target_pause_time_ms(target_pause_time_ms), 138 _policy(policy) {} 139 140 bool will_fit(uint young_length) const { 141 if (young_length >= _base_free_regions) { 142 // end condition 1: not enough space for the young regions 143 return false; 144 } 145 146 size_t bytes_to_copy = 0; 147 const double copy_time_ms = _policy->predict_eden_copy_time_ms(young_length, &bytes_to_copy); 148 const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length); 149 const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms; 150 if (pause_time_ms > _target_pause_time_ms) { 151 // end condition 2: prediction is over the target pause time 152 return false; 153 } 154 155 const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes; 156 157 // When copying, we will likely need more bytes free than is live in the region. 158 // Add some safety margin to factor in the confidence of our guess, and the 159 // natural expected waste. 160 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 161 // of the calculation: the lower the confidence, the more headroom. 162 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 163 // copying due to anticipated waste in the PLABs. 164 const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 165 const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 166 167 if (expected_bytes_to_copy > free_bytes) { 168 // end condition 3: out-of-space 169 return false; 170 } 171 172 // success! 173 return true; 174 } 175 }; 176 177 void G1Policy::record_new_heap_size(uint new_number_of_regions) { 178 // re-calculate the necessary reserve 179 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 180 // We use ceiling so that if reserve_regions_d is > 0.0 (but 181 // smaller than 1.0) we'll get 1. 182 _reserve_regions = (uint) ceil(reserve_regions_d); 183 184 _young_gen_sizer->heap_size_changed(new_number_of_regions); 185 186 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 187 } 188 189 uint G1Policy::calculate_desired_eden_length_by_mmu() const { 190 // One could argue that any useful eden length to keep any MMU would be 1, but 191 // in theory this is possible. Other constraints enforce a minimum eden of 1 192 // anyway. 193 uint desired_min_length = 0; 194 if (use_adaptive_young_list_length()) { 195 double now_sec = os::elapsedTime(); 196 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 197 double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); 198 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 199 } 200 return desired_min_length; 201 } 202 203 uint G1Policy::update_young_max_and_target_length() { 204 return update_young_max_and_target_length(_analytics->predict_rs_length()); 205 } 206 207 uint G1Policy::update_young_max_and_target_length(size_t rs_length) { 208 uint unbounded_target_length = update_young_target_length(rs_length); 209 update_max_gc_locker_expansion(); 210 return unbounded_target_length; 211 } 212 213 uint G1Policy::update_young_target_length(size_t rs_length) { 214 uint desired_length = calculate_young_desired_length(rs_length); 215 _young_list_target_length = calculate_young_target_length(desired_length); 216 217 log_debug(gc,ergo,heap)("Young target lengths: desired: %u target: %u", 218 desired_length, _young_list_target_length); 219 return desired_length; 220 } 221 222 // Calculates desired young gen length. It is calculated from: 223 // 224 // - sizer min/max bounds on young gen 225 // - pause time goal for whole young gen evacuation 226 // - MMU goal influencing eden to make GCs spaced apart. 227 // - a minimum one eden region length. 228 // 229 uint G1Policy::calculate_young_desired_length(size_t rs_length) const { 230 uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length(); 231 uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length(); 232 233 assert(min_young_length_by_sizer >= 1, "invariant"); 234 assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant"); 235 236 // Absolute minimum eden length. See above why. 237 // Enforcing a minimum eden length helps at startup when the predictors are not 238 // yet trained on the application to avoid unnecessary (but very short) full gcs 239 // on very small (initial) heaps. 240 uint const MinDesiredEdenLength = 1; 241 242 // Calculate the absolute and desired min bounds first. 243 244 // This is how many survivor regions we already have. 245 const uint survivor_length = _g1h->survivor_regions_count(); 246 // Size of the already allocated young gen. 247 const uint allocated_young_length = _g1h->young_regions_count(); 248 // This is the absolute minimum young length that we can return. Ensure that we 249 // don't go below any user-defined minimum bound; but we might have already 250 // allocated more than that for reasons. In this case, use that. 251 uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer); 252 // Calculate the absolute max bounds. After evac failure or when revising the 253 // young length we might have exceeded absolute min length or absolute_max_length, 254 // so adjust the result accordingly. 255 uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length); 256 257 uint desired_eden_length_by_mmu = 0; 258 uint desired_eden_length_by_pause = 0; 259 uint desired_eden_length_before_mixed = 0; 260 261 uint desired_young_length = 0; 262 if (use_adaptive_young_list_length()) { 263 desired_eden_length_by_mmu = calculate_desired_eden_length_by_mmu(); 264 265 const size_t pending_cards = _analytics->predict_pending_cards(); 266 double survivor_base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length); 267 268 if (!next_gc_should_be_mixed(NULL, NULL)) { 269 desired_eden_length_by_pause = 270 calculate_desired_eden_length_by_pause(survivor_base_time_ms, 271 absolute_min_young_length - survivor_length, 272 absolute_max_young_length - survivor_length); 273 } else { 274 desired_eden_length_before_mixed = 275 calculate_desired_eden_length_before_mixed(survivor_base_time_ms, 276 absolute_min_young_length - survivor_length, 277 absolute_max_young_length - survivor_length); 278 } 279 // Above either sets desired_eden_length_by_pause or desired_eden_length_before_mixed, 280 // the other is zero. Use the one that has been set below. 281 uint desired_eden_length = MAX2(desired_eden_length_by_pause, 282 desired_eden_length_before_mixed); 283 284 // Finally incorporate MMU concerns; assume that it overrides the pause time 285 // goal, as the default value has been chosen to effectively disable it. 286 // Also request at least one eden region, see above for reasons. 287 desired_eden_length = MAX3(desired_eden_length, 288 desired_eden_length_by_mmu, 289 MinDesiredEdenLength); 290 291 desired_young_length = desired_eden_length + survivor_length; 292 } else { 293 // The user asked for a fixed young gen so we'll fix the young gen 294 // whether the next GC is young or mixed. 295 desired_young_length = min_young_length_by_sizer; 296 } 297 // Clamp to absolute min/max after we determined desired lengths. 298 desired_young_length = clamp(desired_young_length, absolute_min_young_length, absolute_max_young_length); 299 300 log_trace(gc, ergo, heap)("Young desired length %u " 301 "survivor length %u " 302 "allocated young length %u " 303 "absolute min young length %u " 304 "absolute max young length %u " 305 "desired eden length by mmu %u " 306 "desired eden length by pause %u " 307 "desired eden length before mixed %u" 308 "desired eden length by default %u", 309 desired_young_length, survivor_length, 310 allocated_young_length, absolute_min_young_length, 311 absolute_max_young_length, desired_eden_length_by_mmu, 312 desired_eden_length_by_pause, 313 desired_eden_length_before_mixed, 314 MinDesiredEdenLength); 315 316 assert(desired_young_length >= allocated_young_length, "must be"); 317 return desired_young_length; 318 } 319 320 // Limit the desired (wished) young length by current free regions. If the request 321 // can be satisfied without using up reserve regions, do so, otherwise eat into 322 // the reserve, giving away at most what the heap sizer allows. 323 uint G1Policy::calculate_young_target_length(uint desired_young_length) const { 324 uint allocated_young_length = _g1h->young_regions_count(); 325 326 uint receiving_additional_eden; 327 if (allocated_young_length >= desired_young_length) { 328 // Already used up all we actually want (may happen as G1 revises the 329 // young list length concurrently, or caused by gclocker). Do not allow more, 330 // potentially resulting in GC. 331 receiving_additional_eden = 0; 332 log_trace(gc, ergo, heap)("Young target length: Already used up desired young %u allocated %u", 333 desired_young_length, 334 allocated_young_length); 335 } else { 336 // Now look at how many free regions are there currently, and the heap reserve. 337 // We will try our best not to "eat" into the reserve as long as we can. If we 338 // do, we at most eat the sizer's minimum regions into the reserve or half the 339 // reserve rounded up (if possible; this is an arbitrary value). 340 341 uint max_to_eat_into_reserve = MIN2(_young_gen_sizer->min_desired_young_length(), 342 (_reserve_regions + 1) / 2); 343 344 log_trace(gc, ergo, heap)("Young target length: Common " 345 "free regions at end of collection %u " 346 "desired young length %u " 347 "reserve region %u " 348 "max to eat into reserve %u", 349 _free_regions_at_end_of_collection, 350 desired_young_length, 351 _reserve_regions, 352 max_to_eat_into_reserve); 353 354 if (_free_regions_at_end_of_collection <= _reserve_regions) { 355 // Fully eat (or already eating) into the reserve, hand back at most absolute_min_length regions. 356 uint receiving_young = MIN3(_free_regions_at_end_of_collection, 357 desired_young_length, 358 max_to_eat_into_reserve); 359 // We could already have allocated more regions than what we could get 360 // above. 361 receiving_additional_eden = allocated_young_length < receiving_young ? 362 receiving_young - allocated_young_length : 0; 363 364 log_trace(gc, ergo, heap)("Young target length: Fully eat into reserve " 365 "receiving young %u receiving additional eden %u", 366 receiving_young, 367 receiving_additional_eden); 368 } else if (_free_regions_at_end_of_collection < (desired_young_length + _reserve_regions)) { 369 // Partially eat into the reserve, at most max_to_eat_into_reserve regions. 370 uint free_outside_reserve = _free_regions_at_end_of_collection - _reserve_regions; 371 assert(free_outside_reserve < desired_young_length, 372 "must be %u %u", 373 free_outside_reserve, desired_young_length); 374 375 uint receiving_within_reserve = MIN2(desired_young_length - free_outside_reserve, 376 max_to_eat_into_reserve); 377 uint receiving_young = free_outside_reserve + receiving_within_reserve; 378 // Again, we could have already allocated more than we could get. 379 receiving_additional_eden = allocated_young_length < receiving_young ? 380 receiving_young - allocated_young_length : 0; 381 382 log_trace(gc, ergo, heap)("Young target length: Partially eat into reserve " 383 "free outside reserve %u " 384 "receiving within reserve %u " 385 "receiving young %u " 386 "receiving additional eden %u", 387 free_outside_reserve, receiving_within_reserve, 388 receiving_young, receiving_additional_eden); 389 } else { 390 // No need to use the reserve. 391 receiving_additional_eden = desired_young_length - allocated_young_length; 392 log_trace(gc, ergo, heap)("Young target length: No need to use reserve " 393 "receiving additional eden %u", 394 receiving_additional_eden); 395 } 396 } 397 398 uint target_young_length = allocated_young_length + receiving_additional_eden; 399 400 assert(target_young_length >= allocated_young_length, "must be"); 401 402 log_trace(gc, ergo, heap)("Young target length: " 403 "young target length %u " 404 "allocated young length %u " 405 "received additional eden %u", 406 target_young_length, allocated_young_length, 407 receiving_additional_eden); 408 return target_young_length; 409 } 410 411 uint G1Policy::calculate_desired_eden_length_by_pause(double base_time_ms, 412 uint min_eden_length, 413 uint max_eden_length) const { 414 assert(use_adaptive_young_list_length(), "pre-condition"); 415 416 assert(min_eden_length <= max_eden_length, "must be %u %u", min_eden_length, max_eden_length); 417 418 // Here, we will make sure that the shortest young length that 419 // makes sense fits within the target pause time. 420 421 G1YoungLengthPredictor p(base_time_ms, 422 _free_regions_at_end_of_collection, 423 _mmu_tracker->max_gc_time() * 1000.0, 424 this); 425 if (p.will_fit(min_eden_length)) { 426 // The shortest young length will fit into the target pause time; 427 // we'll now check whether the absolute maximum number of young 428 // regions will fit in the target pause time. If not, we'll do 429 // a binary search between min_young_length and max_young_length. 430 if (p.will_fit(max_eden_length)) { 431 // The maximum young length will fit into the target pause time. 432 // We are done so set min young length to the maximum length (as 433 // the result is assumed to be returned in min_young_length). 434 min_eden_length = max_eden_length; 435 } else { 436 // The maximum possible number of young regions will not fit within 437 // the target pause time so we'll search for the optimal 438 // length. The loop invariants are: 439 // 440 // min_young_length < max_young_length 441 // min_young_length is known to fit into the target pause time 442 // max_young_length is known not to fit into the target pause time 443 // 444 // Going into the loop we know the above hold as we've just 445 // checked them. Every time around the loop we check whether 446 // the middle value between min_young_length and 447 // max_young_length fits into the target pause time. If it 448 // does, it becomes the new min. If it doesn't, it becomes 449 // the new max. This way we maintain the loop invariants. 450 451 assert(min_eden_length < max_eden_length, "invariant"); 452 uint diff = (max_eden_length - min_eden_length) / 2; 453 while (diff > 0) { 454 uint eden_length = min_eden_length + diff; 455 if (p.will_fit(eden_length)) { 456 min_eden_length = eden_length; 457 } else { 458 max_eden_length = eden_length; 459 } 460 assert(min_eden_length < max_eden_length, "invariant"); 461 diff = (max_eden_length - min_eden_length) / 2; 462 } 463 // The results is min_young_length which, according to the 464 // loop invariants, should fit within the target pause time. 465 466 // These are the post-conditions of the binary search above: 467 assert(min_eden_length < max_eden_length, 468 "otherwise we should have discovered that max_eden_length " 469 "fits into the pause target and not done the binary search"); 470 assert(p.will_fit(min_eden_length), 471 "min_eden_length, the result of the binary search, should " 472 "fit into the pause target"); 473 assert(!p.will_fit(min_eden_length + 1), 474 "min_eden_length, the result of the binary search, should be " 475 "optimal, so no larger length should fit into the pause target"); 476 } 477 } else { 478 // Even the minimum length doesn't fit into the pause time 479 // target, return it as the result nevertheless. 480 } 481 return min_eden_length; 482 } 483 484 uint G1Policy::calculate_desired_eden_length_before_mixed(double survivor_base_time_ms, 485 uint min_eden_length, 486 uint max_eden_length) const { 487 G1CollectionSetCandidates* candidates = _collection_set->candidates(); 488 489 uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(), candidates->num_regions()); 490 double predicted_region_evac_time_ms = survivor_base_time_ms; 491 for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) { 492 HeapRegion* r = candidates->at(i); 493 predicted_region_evac_time_ms += predict_region_total_time_ms(r, false); 494 } 495 uint desired_eden_length_by_min_cset_length = 496 calculate_desired_eden_length_by_pause(predicted_region_evac_time_ms, 497 min_eden_length, 498 max_eden_length); 499 500 return desired_eden_length_by_min_cset_length; 501 } 502 503 double G1Policy::predict_survivor_regions_evac_time() const { 504 double survivor_regions_evac_time = 0.0; 505 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions(); 506 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); 507 it != survivor_regions->end(); 508 ++it) { 509 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase()); 510 } 511 return survivor_regions_evac_time; 512 } 513 514 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) { 515 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" ); 516 517 if (rs_length > _rs_length_prediction) { 518 // add 10% to avoid having to recalculate often 519 size_t rs_length_prediction = rs_length * 1100 / 1000; 520 update_rs_length_prediction(rs_length_prediction); 521 522 update_young_max_and_target_length(rs_length_prediction); 523 } 524 } 525 526 void G1Policy::update_rs_length_prediction() { 527 update_rs_length_prediction(_analytics->predict_rs_length()); 528 } 529 530 void G1Policy::update_rs_length_prediction(size_t prediction) { 531 if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) { 532 _rs_length_prediction = prediction; 533 } 534 } 535 536 void G1Policy::record_full_collection_start() { 537 _full_collection_start_sec = os::elapsedTime(); 538 // Release the future to-space so that it is available for compaction into. 539 collector_state()->set_in_young_only_phase(false); 540 collector_state()->set_in_full_gc(true); 541 _collection_set->clear_candidates(); 542 _pending_cards_at_gc_start = 0; 543 } 544 545 void G1Policy::record_full_collection_end() { 546 // Consider this like a collection pause for the purposes of allocation 547 // since last pause. 548 double end_sec = os::elapsedTime(); 549 double full_gc_time_sec = end_sec - _full_collection_start_sec; 550 double full_gc_time_ms = full_gc_time_sec * 1000.0; 551 552 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); 553 554 collector_state()->set_in_full_gc(false); 555 556 // "Nuke" the heuristics that control the young/mixed GC 557 // transitions and make sure we start with young GCs after the Full GC. 558 collector_state()->set_in_young_only_phase(true); 559 collector_state()->set_in_young_gc_before_mixed(false); 560 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 561 collector_state()->set_in_initial_mark_gc(false); 562 collector_state()->set_mark_or_rebuild_in_progress(false); 563 collector_state()->set_clearing_next_bitmap(false); 564 565 _eden_surv_rate_group->start_adding_regions(); 566 // also call this on any additional surv rate groups 567 568 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 569 _survivor_surv_rate_group->reset(); 570 update_young_max_and_target_length(); 571 update_rs_length_prediction(); 572 573 _old_gen_alloc_tracker.reset_after_full_gc(); 574 575 record_pause(FullGC, _full_collection_start_sec, end_sec); 576 } 577 578 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) { 579 log_debug(gc, refine, stats) 580 ("%s refinement: %.2fms, refined: " SIZE_FORMAT 581 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT, 582 kind, 583 stats.refinement_time().seconds() * MILLIUNITS, 584 stats.refined_cards(), 585 stats.precleaned_cards(), 586 stats.dirtied_cards()); 587 } 588 589 void G1Policy::record_concurrent_refinement_stats() { 590 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 591 _pending_cards_at_gc_start = dcqs.num_cards(); 592 593 // Collect per-thread stats, mostly from mutator activity. 594 G1ConcurrentRefineStats mut_stats = dcqs.get_and_reset_refinement_stats(); 595 596 // Collect specialized concurrent refinement thread stats. 597 G1ConcurrentRefine* cr = _g1h->concurrent_refine(); 598 G1ConcurrentRefineStats cr_stats = cr->get_and_reset_refinement_stats(); 599 600 G1ConcurrentRefineStats total_stats = mut_stats + cr_stats; 601 602 log_refinement_stats("Mutator", mut_stats); 603 log_refinement_stats("Concurrent", cr_stats); 604 log_refinement_stats("Total", total_stats); 605 606 // Record the rate at which cards were refined. 607 // Don't update the rate if the current sample is empty or time is zero. 608 Tickspan refinement_time = total_stats.refinement_time(); 609 size_t refined_cards = total_stats.refined_cards(); 610 if ((refined_cards > 0) && (refinement_time > Tickspan())) { 611 double rate = refined_cards / (refinement_time.seconds() * MILLIUNITS); 612 _analytics->report_concurrent_refine_rate_ms(rate); 613 log_debug(gc, refine, stats)("Concurrent refinement rate: %.2f cards/ms", rate); 614 } 615 616 // Record mutator's card logging rate. 617 double mut_start_time = _analytics->prev_collection_pause_end_ms(); 618 double mut_end_time = phase_times()->cur_collection_start_sec() * MILLIUNITS; 619 double mut_time = mut_end_time - mut_start_time; 620 // Unlike above for conc-refine rate, here we should not require a 621 // non-empty sample, since an application could go some time with only 622 // young-gen or filtered out writes. But we'll ignore unusually short 623 // sample periods, as they may just pollute the predictions. 624 if (mut_time > 1.0) { // Require > 1ms sample time. 625 double dirtied_rate = total_stats.dirtied_cards() / mut_time; 626 _analytics->report_dirtied_cards_rate_ms(dirtied_rate); 627 log_debug(gc, refine, stats)("Generate dirty cards rate: %.2f cards/ms", dirtied_rate); 628 } 629 } 630 631 void G1Policy::record_collection_pause_start(double start_time_sec) { 632 // We only need to do this here as the policy will only be applied 633 // to the GC we're about to start. so, no point is calculating this 634 // every time we calculate / recalculate the target young length. 635 update_survivors_policy(); 636 637 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(), 638 "Maximum survivor regions %u plus used regions %u exceeds max regions %u", 639 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions()); 640 assert_used_and_recalculate_used_equal(_g1h); 641 642 phase_times()->record_cur_collection_start_sec(start_time_sec); 643 644 record_concurrent_refinement_stats(); 645 646 _collection_set->reset_bytes_used_before(); 647 648 // do that for any other surv rate groups 649 _eden_surv_rate_group->stop_adding_regions(); 650 _survivors_age_table.clear(); 651 652 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); 653 } 654 655 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { 656 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 657 collector_state()->set_in_initial_mark_gc(false); 658 } 659 660 void G1Policy::record_concurrent_mark_remark_start() { 661 _mark_remark_start_sec = os::elapsedTime(); 662 } 663 664 void G1Policy::record_concurrent_mark_remark_end() { 665 double end_time_sec = os::elapsedTime(); 666 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 667 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); 668 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 669 670 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 671 } 672 673 void G1Policy::record_concurrent_mark_cleanup_start() { 674 _mark_cleanup_start_sec = os::elapsedTime(); 675 } 676 677 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 678 return phase_times()->average_time_ms(phase); 679 } 680 681 double G1Policy::young_other_time_ms() const { 682 return phase_times()->young_cset_choice_time_ms() + 683 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet); 684 } 685 686 double G1Policy::non_young_other_time_ms() const { 687 return phase_times()->non_young_cset_choice_time_ms() + 688 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet); 689 } 690 691 double G1Policy::other_time_ms(double pause_time_ms) const { 692 return pause_time_ms - phase_times()->cur_collection_par_time_ms(); 693 } 694 695 double G1Policy::constant_other_time_ms(double pause_time_ms) const { 696 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms() - phase_times()->total_rebuild_freelist_time_ms(); 697 } 698 699 bool G1Policy::about_to_start_mixed_phase() const { 700 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed(); 701 } 702 703 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 704 if (about_to_start_mixed_phase()) { 705 return false; 706 } 707 708 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 709 710 size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); 711 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 712 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 713 714 bool result = false; 715 if (marking_request_bytes > marking_initiating_used_threshold) { 716 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed(); 717 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 718 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 719 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source); 720 } 721 722 return result; 723 } 724 725 double G1Policy::logged_cards_processing_time() const { 726 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); 727 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); 728 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + 729 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); 730 // This may happen if there are duplicate cards in different log buffers. 731 if (logged_dirty_cards > scan_heap_roots_cards) { 732 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB); 733 } 734 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB); 735 } 736 737 // Anything below that is considered to be zero 738 #define MIN_TIMER_GRANULARITY 0.0000001 739 740 void G1Policy::record_collection_pause_end(double pause_time_ms) { 741 G1GCPhaseTimes* p = phase_times(); 742 743 double end_time_sec = os::elapsedTime(); 744 745 bool this_pause_included_initial_mark = false; 746 bool this_pause_was_young_only = collector_state()->in_young_only_phase(); 747 748 bool update_stats = !_g1h->evacuation_failed(); 749 750 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 751 752 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 753 754 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc(); 755 if (this_pause_included_initial_mark) { 756 record_concurrent_mark_init_end(0.0); 757 } else { 758 maybe_start_marking(); 759 } 760 761 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); 762 if (app_time_ms < MIN_TIMER_GRANULARITY) { 763 // This usually happens due to the timer not having the required 764 // granularity. Some Linuxes are the usual culprits. 765 // We'll just set it to something (arbitrarily) small. 766 app_time_ms = 1.0; 767 } 768 769 if (update_stats) { 770 // We maintain the invariant that all objects allocated by mutator 771 // threads will be allocated out of eden regions. So, we can use 772 // the eden region number allocated since the previous GC to 773 // calculate the application's allocate rate. The only exception 774 // to that is humongous objects that are allocated separately. But 775 // given that humongous object allocations do not really affect 776 // either the pause's duration nor when the next pause will take 777 // place we can safely ignore them here. 778 uint regions_allocated = _collection_set->eden_region_length(); 779 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 780 _analytics->report_alloc_rate_ms(alloc_rate_ms); 781 782 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); 783 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); 784 } 785 786 if (collector_state()->in_young_gc_before_mixed()) { 787 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC"); 788 // This has been the young GC before we start doing mixed GCs. We already 789 // decided to start mixed GCs much earlier, so there is nothing to do except 790 // advancing the state. 791 collector_state()->set_in_young_only_phase(false); 792 collector_state()->set_in_young_gc_before_mixed(false); 793 } else if (!this_pause_was_young_only) { 794 // This is a mixed GC. Here we decide whether to continue doing more 795 // mixed GCs or not. 796 if (!next_gc_should_be_mixed("continue mixed GCs", 797 "do not continue mixed GCs")) { 798 collector_state()->set_in_young_only_phase(true); 799 800 clear_collection_set_candidates(); 801 maybe_start_marking(); 802 } 803 } 804 805 _eden_surv_rate_group->start_adding_regions(); 806 807 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC); 808 if (update_stats) { 809 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) + 810 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); 811 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase. 812 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) + 813 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) + 814 total_log_buffer_cards; 815 816 // The threshold for the number of cards in a given sampling which we consider 817 // large enough so that the impact from setup and other costs is negligible. 818 size_t const CardsNumSamplingThreshold = 10; 819 820 if (total_cards_merged > CardsNumSamplingThreshold) { 821 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) + 822 average_time_ms(G1GCPhaseTimes::MergeRS) + 823 average_time_ms(G1GCPhaseTimes::MergeHCC) + 824 average_time_ms(G1GCPhaseTimes::MergeLB) + 825 average_time_ms(G1GCPhaseTimes::OptMergeRS); 826 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only); 827 } 828 829 // Update prediction for card scan 830 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + 831 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); 832 833 if (total_cards_scanned > CardsNumSamplingThreshold) { 834 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) + 835 average_time_ms(G1GCPhaseTimes::OptScanHR); 836 837 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only); 838 } 839 840 // Update prediction for the ratio between cards from the remembered 841 // sets and actually scanned cards from the remembered sets. 842 // Cards from the remembered sets are all cards not duplicated by cards from 843 // the logs. 844 // Due to duplicates in the log buffers, the number of actually scanned cards 845 // can be smaller than the cards in the log buffers. 846 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0; 847 double merge_to_scan_ratio = 0.0; 848 if (total_cards_scanned > 0) { 849 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned; 850 } 851 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only); 852 853 const size_t recorded_rs_length = _collection_set->recorded_rs_length(); 854 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0; 855 _analytics->report_rs_length_diff(rs_length_diff); 856 857 // Update prediction for copy cost per byte 858 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes); 859 860 if (copied_bytes > 0) { 861 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes; 862 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress()); 863 } 864 865 if (_collection_set->young_region_length() > 0) { 866 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / 867 _collection_set->young_region_length()); 868 } 869 870 if (_collection_set->old_region_length() > 0) { 871 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / 872 _collection_set->old_region_length()); 873 } 874 875 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); 876 877 // Do not update RS lengths and the number of pending cards with information from mixed gc: 878 // these are is wildly different to during young only gc and mess up young gen sizing right 879 // after the mixed gc phase. 880 // During mixed gc we do not use them for young gen sizing. 881 if (this_pause_was_young_only) { 882 _analytics->report_pending_cards((double) _pending_cards_at_gc_start); 883 _analytics->report_rs_length((double) _rs_length); 884 } 885 } 886 887 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()), 888 "If the last pause has been an initial mark, we should not have been in the marking window"); 889 if (this_pause_included_initial_mark) { 890 collector_state()->set_mark_or_rebuild_in_progress(true); 891 } 892 893 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 894 895 update_rs_length_prediction(); 896 897 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely 898 // that in this case we are not running in a "normal" operating mode. 899 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { 900 // IHOP control wants to know the expected young gen length if it were not 901 // restrained by the heap reserve. Using the actual length would make the 902 // prediction too small and the limit the young gen every time we get to the 903 // predicted target occupancy. 904 size_t last_unrestrained_young_length = update_young_max_and_target_length(); 905 906 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); 907 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), 908 _old_gen_alloc_tracker.last_cycle_old_bytes(), 909 last_unrestrained_young_length * HeapRegion::GrainBytes, 910 this_pause_was_young_only); 911 912 _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); 913 } else { 914 // Any garbage collection triggered as periodic collection resets the time-to-mixed 915 // measurement. Periodic collection typically means that the application is "inactive", i.e. 916 // the marking threads may have received an uncharacterisic amount of cpu time 917 // for completing the marking, i.e. are faster than expected. 918 // This skews the predicted marking length towards smaller values which might cause 919 // the mark start being too late. 920 _initial_mark_to_mixed.reset(); 921 } 922 923 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 924 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 925 926 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) { 927 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 928 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms", 929 scan_logged_cards_time_goal_ms, merge_hcc_time_ms); 930 931 scan_logged_cards_time_goal_ms = 0; 932 } else { 933 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms; 934 } 935 936 double const logged_cards_time = logged_cards_processing_time(); 937 938 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms", 939 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms); 940 941 _g1h->concurrent_refine()->adjust(logged_cards_time, 942 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards), 943 scan_logged_cards_time_goal_ms); 944 } 945 946 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){ 947 if (G1UseAdaptiveIHOP) { 948 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 949 predictor, 950 G1ReservePercent, 951 G1HeapWastePercent); 952 } else { 953 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 954 } 955 } 956 957 void G1Policy::update_ihop_prediction(double mutator_time_s, 958 size_t mutator_alloc_bytes, 959 size_t young_gen_size, 960 bool this_gc_was_young_only) { 961 // Always try to update IHOP prediction. Even evacuation failures give information 962 // about e.g. whether to start IHOP earlier next time. 963 964 // Avoid using really small application times that might create samples with 965 // very high or very low values. They may be caused by e.g. back-to-back gcs. 966 double const min_valid_time = 1e-6; 967 968 bool report = false; 969 970 double marking_to_mixed_time = -1.0; 971 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) { 972 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 973 assert(marking_to_mixed_time > 0.0, 974 "Initial mark to mixed time must be larger than zero but is %.3f", 975 marking_to_mixed_time); 976 if (marking_to_mixed_time > min_valid_time) { 977 _ihop_control->update_marking_length(marking_to_mixed_time); 978 report = true; 979 } 980 } 981 982 // As an approximation for the young gc promotion rates during marking we use 983 // all of them. In many applications there are only a few if any young gcs during 984 // marking, which makes any prediction useless. This increases the accuracy of the 985 // prediction. 986 if (this_gc_was_young_only && mutator_time_s > min_valid_time) { 987 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 988 report = true; 989 } 990 991 if (report) { 992 report_ihop_statistics(); 993 } 994 } 995 996 void G1Policy::report_ihop_statistics() { 997 _ihop_control->print(); 998 } 999 1000 void G1Policy::print_phases() { 1001 phase_times()->print(); 1002 } 1003 1004 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards, 1005 size_t rs_length) const { 1006 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase()); 1007 return 1008 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) + 1009 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) + 1010 _analytics->predict_constant_other_time_ms() + 1011 predict_survivor_regions_evac_time(); 1012 } 1013 1014 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1015 size_t rs_length = _analytics->predict_rs_length(); 1016 return predict_base_elapsed_time_ms(pending_cards, rs_length); 1017 } 1018 1019 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { 1020 size_t bytes_to_copy; 1021 if (!hr->is_young()) { 1022 bytes_to_copy = hr->max_live_bytes(); 1023 } else { 1024 bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor)); 1025 } 1026 return bytes_to_copy; 1027 } 1028 1029 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const { 1030 if (count == 0) { 1031 return 0.0; 1032 } 1033 size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes; 1034 if (bytes_to_copy != NULL) { 1035 *bytes_to_copy = expected_bytes; 1036 } 1037 return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress()); 1038 } 1039 1040 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const { 1041 size_t const bytes_to_copy = predict_bytes_to_copy(hr); 1042 return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress()); 1043 } 1044 1045 double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr, 1046 bool for_young_gc) const { 1047 size_t rs_length = hr->rem_set()->occupied(); 1048 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc); 1049 1050 double region_elapsed_time_ms = 1051 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) + 1052 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase()); 1053 1054 // The prediction of the "other" time for this region is based 1055 // upon the region type and NOT the GC type. 1056 if (hr->is_young()) { 1057 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); 1058 } else { 1059 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); 1060 } 1061 return region_elapsed_time_ms; 1062 } 1063 1064 double G1Policy::predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const { 1065 return predict_region_non_copy_time_ms(hr, for_young_gc) + predict_region_copy_time_ms(hr); 1066 } 1067 1068 bool G1Policy::should_allocate_mutator_region() const { 1069 uint young_list_length = _g1h->young_regions_count(); 1070 uint young_list_target_length = _young_list_target_length; 1071 return young_list_length < young_list_target_length; 1072 } 1073 1074 bool G1Policy::can_expand_young_list() const { 1075 uint young_list_length = _g1h->young_regions_count(); 1076 uint young_list_max_length = _young_list_max_length; 1077 return young_list_length < young_list_max_length; 1078 } 1079 1080 bool G1Policy::use_adaptive_young_list_length() const { 1081 return _young_gen_sizer->use_adaptive_young_list_length(); 1082 } 1083 1084 size_t G1Policy::desired_survivor_size(uint max_regions) const { 1085 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions; 1086 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); 1087 } 1088 1089 void G1Policy::print_age_table() { 1090 _survivors_age_table.print_age_table(_tenuring_threshold); 1091 } 1092 1093 void G1Policy::update_max_gc_locker_expansion() { 1094 uint expansion_region_num = 0; 1095 if (GCLockerEdenExpansionPercent > 0) { 1096 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1097 double expansion_region_num_d = perc * (double) _young_list_target_length; 1098 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1099 // less than 1.0) we'll get 1. 1100 expansion_region_num = (uint) ceil(expansion_region_num_d); 1101 } else { 1102 assert(expansion_region_num == 0, "sanity"); 1103 } 1104 _young_list_max_length = _young_list_target_length + expansion_region_num; 1105 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1106 } 1107 1108 // Calculates survivor space parameters. 1109 void G1Policy::update_survivors_policy() { 1110 double max_survivor_regions_d = 1111 (double) _young_list_target_length / (double) SurvivorRatio; 1112 1113 // Calculate desired survivor size based on desired max survivor regions (unconstrained 1114 // by remaining heap). Otherwise we may cause undesired promotions as we are 1115 // already getting close to end of the heap, impacting performance even more. 1116 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d); 1117 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions); 1118 1119 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size); 1120 if (UsePerfData) { 1121 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold); 1122 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize); 1123 } 1124 // The real maximum survivor size is bounded by the number of regions that can 1125 // be allocated into. 1126 _max_survivor_regions = MIN2(desired_max_survivor_regions, 1127 _g1h->num_free_or_available_regions()); 1128 } 1129 1130 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1131 // We actually check whether we are marking here and not if we are in a 1132 // reclamation phase. This means that we will schedule a concurrent mark 1133 // even while we are still in the process of reclaiming memory. 1134 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); 1135 if (!during_cycle) { 1136 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1137 collector_state()->set_initiate_conc_mark_if_possible(true); 1138 return true; 1139 } else { 1140 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1141 return false; 1142 } 1143 } 1144 1145 void G1Policy::initiate_conc_mark() { 1146 collector_state()->set_in_initial_mark_gc(true); 1147 collector_state()->set_initiate_conc_mark_if_possible(false); 1148 } 1149 1150 void G1Policy::decide_on_conc_mark_initiation() { 1151 // We are about to decide on whether this pause will be an 1152 // initial-mark pause. 1153 1154 // First, collector_state()->in_initial_mark_gc() should not be already set. We 1155 // will set it here if we have to. However, it should be cleared by 1156 // the end of the pause (it's only set for the duration of an 1157 // initial-mark pause). 1158 assert(!collector_state()->in_initial_mark_gc(), "pre-condition"); 1159 1160 if (collector_state()->initiate_conc_mark_if_possible()) { 1161 // We had noticed on a previous pause that the heap occupancy has 1162 // gone over the initiating threshold and we should start a 1163 // concurrent marking cycle. Or we've been explicitly requested 1164 // to start a concurrent marking cycle. Either way, we initiate 1165 // one if not inhibited for some reason. 1166 1167 GCCause::Cause cause = _g1h->gc_cause(); 1168 if ((cause != GCCause::_wb_breakpoint) && 1169 ConcurrentGCBreakpoints::is_controlled()) { 1170 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); 1171 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { 1172 // Initiate a new initial mark if there is no marking or reclamation going on. 1173 initiate_conc_mark(); 1174 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1175 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || 1176 (cause == GCCause::_wb_breakpoint)) { 1177 // Initiate a user requested initial mark or run_to a breakpoint. 1178 // An initial mark must be young only GC, so the collector state 1179 // must be updated to reflect this. 1180 collector_state()->set_in_young_only_phase(true); 1181 collector_state()->set_in_young_gc_before_mixed(false); 1182 1183 // We might have ended up coming here about to start a mixed phase with a collection set 1184 // active. The following remark might change the change the "evacuation efficiency" of 1185 // the regions in this set, leading to failing asserts later. 1186 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here. 1187 clear_collection_set_candidates(); 1188 abort_time_to_mixed_tracking(); 1189 initiate_conc_mark(); 1190 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)", 1191 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user"); 1192 } else { 1193 // The concurrent marking thread is still finishing up the 1194 // previous cycle. If we start one right now the two cycles 1195 // overlap. In particular, the concurrent marking thread might 1196 // be in the process of clearing the next marking bitmap (which 1197 // we will use for the next cycle if we start one). Starting a 1198 // cycle now will be bad given that parts of the marking 1199 // information might get cleared by the marking thread. And we 1200 // cannot wait for the marking thread to finish the cycle as it 1201 // periodically yields while clearing the next marking bitmap 1202 // and, if it's in a yield point, it's waiting for us to 1203 // finish. So, at this point we will not start a cycle and we'll 1204 // let the concurrent marking thread complete the last one. 1205 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1206 } 1207 } 1208 } 1209 1210 void G1Policy::record_concurrent_mark_cleanup_end() { 1211 G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions()); 1212 _collection_set->set_candidates(candidates); 1213 1214 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs"); 1215 if (!mixed_gc_pending) { 1216 clear_collection_set_candidates(); 1217 abort_time_to_mixed_tracking(); 1218 } 1219 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); 1220 collector_state()->set_mark_or_rebuild_in_progress(false); 1221 1222 double end_sec = os::elapsedTime(); 1223 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1224 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); 1225 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 1226 1227 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1228 } 1229 1230 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { 1231 return percent_of(reclaimable_bytes, _g1h->capacity()); 1232 } 1233 1234 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { 1235 virtual bool do_heap_region(HeapRegion* r) { 1236 r->rem_set()->clear_locked(true /* only_cardset */); 1237 return false; 1238 } 1239 }; 1240 1241 void G1Policy::clear_collection_set_candidates() { 1242 // Clear remembered sets of remaining candidate regions and the actual candidate 1243 // set. 1244 G1ClearCollectionSetCandidateRemSets cl; 1245 _collection_set->candidates()->iterate(&cl); 1246 _collection_set->clear_candidates(); 1247 } 1248 1249 void G1Policy::maybe_start_marking() { 1250 if (need_to_start_conc_mark("end of GC")) { 1251 // Note: this might have already been set, if during the last 1252 // pause we decided to start a cycle but at the beginning of 1253 // this pause we decided to postpone it. That's OK. 1254 collector_state()->set_initiate_conc_mark_if_possible(true); 1255 } 1256 } 1257 1258 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { 1259 assert(!collector_state()->in_full_gc(), "must be"); 1260 if (collector_state()->in_initial_mark_gc()) { 1261 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1262 return InitialMarkGC; 1263 } else if (collector_state()->in_young_gc_before_mixed()) { 1264 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1265 return LastYoungGC; 1266 } else if (collector_state()->in_mixed_phase()) { 1267 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1268 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1269 return MixedGC; 1270 } else { 1271 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1272 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1273 return YoungOnlyGC; 1274 } 1275 } 1276 1277 void G1Policy::record_pause(PauseKind kind, double start, double end) { 1278 // Manage the MMU tracker. For some reason it ignores Full GCs. 1279 if (kind != FullGC) { 1280 _mmu_tracker->add_pause(start, end); 1281 } 1282 // Manage the mutator time tracking from initial mark to first mixed gc. 1283 switch (kind) { 1284 case FullGC: 1285 abort_time_to_mixed_tracking(); 1286 break; 1287 case Cleanup: 1288 case Remark: 1289 case YoungOnlyGC: 1290 case LastYoungGC: 1291 _initial_mark_to_mixed.add_pause(end - start); 1292 break; 1293 case InitialMarkGC: 1294 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { 1295 _initial_mark_to_mixed.record_initial_mark_end(end); 1296 } 1297 break; 1298 case MixedGC: 1299 _initial_mark_to_mixed.record_mixed_gc_start(start); 1300 break; 1301 default: 1302 ShouldNotReachHere(); 1303 } 1304 } 1305 1306 void G1Policy::abort_time_to_mixed_tracking() { 1307 _initial_mark_to_mixed.reset(); 1308 } 1309 1310 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, 1311 const char* false_action_str) const { 1312 G1CollectionSetCandidates* candidates = _collection_set->candidates(); 1313 1314 if (candidates == NULL || candidates->is_empty()) { 1315 if (false_action_str != NULL) { 1316 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1317 } 1318 return false; 1319 } 1320 1321 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1322 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); 1323 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); 1324 double threshold = (double) G1HeapWastePercent; 1325 if (reclaimable_percent <= threshold) { 1326 if (false_action_str != NULL) { 1327 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1328 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); 1329 } 1330 return false; 1331 } 1332 if (true_action_str != NULL) { 1333 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1334 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); 1335 } 1336 return true; 1337 } 1338 1339 uint G1Policy::calc_min_old_cset_length() const { 1340 // The min old CSet region bound is based on the maximum desired 1341 // number of mixed GCs after a cycle. I.e., even if some old regions 1342 // look expensive, we should add them to the CSet anyway to make 1343 // sure we go through the available old regions in no more than the 1344 // maximum desired number of mixed GCs. 1345 // 1346 // The calculation is based on the number of marked regions we added 1347 // to the CSet candidates in the first place, not how many remain, so 1348 // that the result is the same during all mixed GCs that follow a cycle. 1349 1350 const size_t region_num = _collection_set->candidates()->num_regions(); 1351 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1352 size_t result = region_num / gc_num; 1353 // emulate ceiling 1354 if (result * gc_num < region_num) { 1355 result += 1; 1356 } 1357 return (uint) result; 1358 } 1359 1360 uint G1Policy::calc_max_old_cset_length() const { 1361 // The max old CSet region bound is based on the threshold expressed 1362 // as a percentage of the heap size. I.e., it should bound the 1363 // number of old regions added to the CSet irrespective of how many 1364 // of them are available. 1365 1366 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1367 const size_t region_num = g1h->num_regions(); 1368 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1369 size_t result = region_num * perc / 100; 1370 // emulate ceiling 1371 if (100 * result < region_num * perc) { 1372 result += 1; 1373 } 1374 return (uint) result; 1375 } 1376 1377 void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, 1378 double time_remaining_ms, 1379 uint& num_initial_regions, 1380 uint& num_optional_regions) { 1381 assert(candidates != NULL, "Must be"); 1382 1383 num_initial_regions = 0; 1384 num_optional_regions = 0; 1385 uint num_expensive_regions = 0; 1386 1387 double predicted_old_time_ms = 0.0; 1388 double predicted_initial_time_ms = 0.0; 1389 double predicted_optional_time_ms = 0.0; 1390 1391 double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction(); 1392 1393 const uint min_old_cset_length = calc_min_old_cset_length(); 1394 const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length()); 1395 const uint max_optional_regions = max_old_cset_length - min_old_cset_length; 1396 bool check_time_remaining = use_adaptive_young_list_length(); 1397 1398 uint candidate_idx = candidates->cur_idx(); 1399 1400 log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, " 1401 "time remaining %1.2fms, optional threshold %1.2fms", 1402 min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms); 1403 1404 HeapRegion* hr = candidates->at(candidate_idx); 1405 while (hr != NULL) { 1406 if (num_initial_regions + num_optional_regions >= max_old_cset_length) { 1407 // Added maximum number of old regions to the CSet. 1408 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). " 1409 "Initial %u regions, optional %u regions", 1410 num_initial_regions, num_optional_regions); 1411 break; 1412 } 1413 1414 // Stop adding regions if the remaining reclaimable space is 1415 // not above G1HeapWastePercent. 1416 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); 1417 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); 1418 double threshold = (double) G1HeapWastePercent; 1419 if (reclaimable_percent <= threshold) { 1420 // We've added enough old regions that the amount of uncollected 1421 // reclaimable space is at or below the waste threshold. Stop 1422 // adding old regions to the CSet. 1423 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). " 1424 "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%", 1425 byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes), 1426 reclaimable_percent, G1HeapWastePercent); 1427 break; 1428 } 1429 1430 double predicted_time_ms = predict_region_total_time_ms(hr, false); 1431 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 1432 // Add regions to old set until we reach the minimum amount 1433 if (num_initial_regions < min_old_cset_length) { 1434 predicted_old_time_ms += predicted_time_ms; 1435 num_initial_regions++; 1436 // Record the number of regions added with no time remaining 1437 if (time_remaining_ms == 0.0) { 1438 num_expensive_regions++; 1439 } 1440 } else if (!check_time_remaining) { 1441 // In the non-auto-tuning case, we'll finish adding regions 1442 // to the CSet if we reach the minimum. 1443 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min)."); 1444 break; 1445 } else { 1446 // Keep adding regions to old set until we reach the optional threshold 1447 if (time_remaining_ms > optional_threshold_ms) { 1448 predicted_old_time_ms += predicted_time_ms; 1449 num_initial_regions++; 1450 } else if (time_remaining_ms > 0) { 1451 // Keep adding optional regions until time is up. 1452 assert(num_optional_regions < max_optional_regions, "Should not be possible."); 1453 predicted_optional_time_ms += predicted_time_ms; 1454 num_optional_regions++; 1455 } else { 1456 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high)."); 1457 break; 1458 } 1459 } 1460 hr = candidates->at(++candidate_idx); 1461 } 1462 if (hr == NULL) { 1463 log_debug(gc, ergo, cset)("Old candidate collection set empty."); 1464 } 1465 1466 if (num_expensive_regions > 0) { 1467 log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.", 1468 num_expensive_regions); 1469 } 1470 1471 log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, " 1472 "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f", 1473 num_initial_regions, num_optional_regions, 1474 predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms); 1475 } 1476 1477 void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, 1478 uint const max_optional_regions, 1479 double time_remaining_ms, 1480 uint& num_optional_regions) { 1481 assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase"); 1482 1483 num_optional_regions = 0; 1484 double prediction_ms = 0; 1485 uint candidate_idx = candidates->cur_idx(); 1486 1487 HeapRegion* r = candidates->at(candidate_idx); 1488 while (num_optional_regions < max_optional_regions) { 1489 assert(r != NULL, "Region must exist"); 1490 prediction_ms += predict_region_total_time_ms(r, false); 1491 1492 if (prediction_ms > time_remaining_ms) { 1493 log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.", 1494 prediction_ms, r->hrm_index(), time_remaining_ms); 1495 break; 1496 } 1497 // This region will be included in the next optional evacuation. 1498 1499 time_remaining_ms -= prediction_ms; 1500 num_optional_regions++; 1501 r = candidates->at(++candidate_idx); 1502 } 1503 1504 log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms", 1505 num_optional_regions, max_optional_regions, prediction_ms); 1506 } 1507 1508 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { 1509 note_start_adding_survivor_regions(); 1510 1511 HeapRegion* last = NULL; 1512 for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin(); 1513 it != survivors->regions()->end(); 1514 ++it) { 1515 HeapRegion* curr = *it; 1516 set_region_survivor(curr); 1517 1518 // The region is a non-empty survivor so let's add it to 1519 // the incremental collection set for the next evacuation 1520 // pause. 1521 _collection_set->add_survivor_regions(curr); 1522 1523 last = curr; 1524 } 1525 note_stop_adding_survivor_regions(); 1526 1527 // Don't clear the survivor list handles until the start of 1528 // the next evacuation pause - we need it in order to re-tag 1529 // the survivor regions from this evacuation pause as 'young' 1530 // at the start of the next. 1531 }