1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Analytics.hpp" 27 #include "gc/g1/g1Arguments.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectionSet.hpp" 30 #include "gc/g1/g1CollectionSetCandidates.hpp" 31 #include "gc/g1/g1ConcurrentMark.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1ConcurrentRefine.hpp" 34 #include "gc/g1/g1ConcurrentRefineStats.hpp" 35 #include "gc/g1/g1CollectionSetChooser.hpp" 36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp" 37 #include "gc/g1/g1HotCardCache.hpp" 38 #include "gc/g1/g1IHOPControl.hpp" 39 #include "gc/g1/g1GCPhaseTimes.hpp" 40 #include "gc/g1/g1Policy.hpp" 41 #include "gc/g1/g1SurvivorRegions.hpp" 42 #include "gc/g1/g1YoungGenSizer.hpp" 43 #include "gc/g1/heapRegion.inline.hpp" 44 #include "gc/g1/heapRegionRemSet.hpp" 45 #include "gc/shared/concurrentGCBreakpoints.hpp" 46 #include "gc/shared/gcPolicyCounters.hpp" 47 #include "logging/log.hpp" 48 #include "runtime/arguments.hpp" 49 #include "runtime/java.hpp" 50 #include "runtime/mutexLocker.hpp" 51 #include "utilities/debug.hpp" 52 #include "utilities/growableArray.hpp" 53 #include "utilities/pair.hpp" 54 55 G1Policy::G1Policy(STWGCTimer* gc_timer) : 56 _predictor(G1ConfidencePercent / 100.0), 57 _analytics(new G1Analytics(&_predictor)), 58 _remset_tracker(), 59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), 60 _ihop_control(create_ihop_control(&_predictor)), 61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)), 62 _full_collection_start_sec(0.0), 63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), 64 _young_list_target_length(0), 65 _young_list_fixed_length(0), 66 _young_list_max_length(0), 67 _eden_surv_rate_group(new G1SurvRateGroup()), 68 _survivor_surv_rate_group(new G1SurvRateGroup()), 69 _reserve_factor((double) G1ReservePercent / 100.0), 70 _reserve_regions(0), 71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()), 72 _free_regions_at_end_of_collection(0), 73 _rs_length(0), 74 _rs_length_prediction(0), 75 _pending_cards_at_gc_start(0), 76 _old_gen_alloc_tracker(), 77 _initial_mark_to_mixed(), 78 _collection_set(NULL), 79 _g1h(NULL), 80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), 81 _mark_remark_start_sec(0), 82 _mark_cleanup_start_sec(0), 83 _tenuring_threshold(MaxTenuringThreshold), 84 _max_survivor_regions(0), 85 _survivors_age_table(true) 86 { 87 } 88 89 G1Policy::~G1Policy() { 90 delete _ihop_control; 91 delete _young_gen_sizer; 92 } 93 94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) { 95 if (G1Arguments::is_heterogeneous_heap()) { 96 return new G1HeterogeneousHeapPolicy(gc_timer_stw); 97 } else { 98 return new G1Policy(gc_timer_stw); 99 } 100 } 101 102 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); } 103 104 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { 105 _g1h = g1h; 106 _collection_set = collection_set; 107 108 assert(Heap_lock->owned_by_self(), "Locking discipline."); 109 110 if (!use_adaptive_young_list_length()) { 111 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 112 } 113 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions()); 114 115 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 116 117 update_young_list_max_and_target_length(); 118 // We may immediately start allocating regions and placing them on the 119 // collection set list. Initialize the per-collection set info 120 _collection_set->start_incremental_building(); 121 } 122 123 void G1Policy::note_gc_start() { 124 phase_times()->note_gc_start(); 125 } 126 127 class G1YoungLengthPredictor { 128 const double _base_time_ms; 129 const double _base_free_regions; 130 const double _target_pause_time_ms; 131 const G1Policy* const _policy; 132 133 public: 134 G1YoungLengthPredictor(double base_time_ms, 135 double base_free_regions, 136 double target_pause_time_ms, 137 const G1Policy* policy) : 138 _base_time_ms(base_time_ms), 139 _base_free_regions(base_free_regions), 140 _target_pause_time_ms(target_pause_time_ms), 141 _policy(policy) {} 142 143 bool will_fit(uint young_length) const { 144 if (young_length >= _base_free_regions) { 145 // end condition 1: not enough space for the young regions 146 return false; 147 } 148 149 size_t bytes_to_copy = 0; 150 const double copy_time_ms = _policy->predict_eden_copy_time_ms(young_length, &bytes_to_copy); 151 const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length); 152 const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms; 153 if (pause_time_ms > _target_pause_time_ms) { 154 // end condition 2: prediction is over the target pause time 155 return false; 156 } 157 158 const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes; 159 160 // When copying, we will likely need more bytes free than is live in the region. 161 // Add some safety margin to factor in the confidence of our guess, and the 162 // natural expected waste. 163 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty 164 // of the calculation: the lower the confidence, the more headroom. 165 // (100 + TargetPLABWastePct) represents the increase in expected bytes during 166 // copying due to anticipated waste in the PLABs. 167 const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; 168 const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); 169 170 if (expected_bytes_to_copy > free_bytes) { 171 // end condition 3: out-of-space 172 return false; 173 } 174 175 // success! 176 return true; 177 } 178 }; 179 180 void G1Policy::record_new_heap_size(uint new_number_of_regions) { 181 // re-calculate the necessary reserve 182 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 183 // We use ceiling so that if reserve_regions_d is > 0.0 (but 184 // smaller than 1.0) we'll get 1. 185 _reserve_regions = (uint) ceil(reserve_regions_d); 186 187 _young_gen_sizer->heap_size_changed(new_number_of_regions); 188 189 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); 190 } 191 192 uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const { 193 uint desired_min_length = 0; 194 if (use_adaptive_young_list_length()) { 195 if (_analytics->num_alloc_rate_ms() > 3) { 196 double now_sec = os::elapsedTime(); 197 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 198 double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); 199 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 200 } else { 201 // otherwise we don't have enough info to make the prediction 202 } 203 } 204 desired_min_length += base_min_length; 205 // make sure we don't go below any user-defined minimum bound 206 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 207 } 208 209 uint G1Policy::calculate_young_list_desired_max_length() const { 210 // Here, we might want to also take into account any additional 211 // constraints (i.e., user-defined minimum bound). Currently, we 212 // effectively don't set this bound. 213 return _young_gen_sizer->max_desired_young_length(); 214 } 215 216 uint G1Policy::update_young_list_max_and_target_length() { 217 return update_young_list_max_and_target_length(_analytics->predict_rs_length()); 218 } 219 220 uint G1Policy::update_young_list_max_and_target_length(size_t rs_length) { 221 uint unbounded_target_length = update_young_list_target_length(rs_length); 222 update_max_gc_locker_expansion(); 223 return unbounded_target_length; 224 } 225 226 uint G1Policy::update_young_list_target_length(size_t rs_length) { 227 YoungTargetLengths young_lengths = young_list_target_lengths(rs_length); 228 _young_list_target_length = young_lengths.first; 229 230 return young_lengths.second; 231 } 232 233 G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_length) const { 234 YoungTargetLengths result; 235 236 // Calculate the absolute and desired min bounds first. 237 238 // This is how many young regions we already have (currently: the survivors). 239 const uint base_min_length = _g1h->survivor_regions_count(); 240 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 241 // This is the absolute minimum young length. Ensure that we 242 // will at least have one eden region available for allocation. 243 uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1); 244 // If we shrank the young list target it should not shrink below the current size. 245 desired_min_length = MAX2(desired_min_length, absolute_min_length); 246 // Calculate the absolute and desired max bounds. 247 248 uint desired_max_length = calculate_young_list_desired_max_length(); 249 250 uint young_list_target_length = 0; 251 if (use_adaptive_young_list_length()) { 252 if (collector_state()->in_young_only_phase()) { 253 young_list_target_length = 254 calculate_young_list_target_length(rs_length, 255 base_min_length, 256 desired_min_length, 257 desired_max_length); 258 } else { 259 // Don't calculate anything and let the code below bound it to 260 // the desired_min_length, i.e., do the next GC as soon as 261 // possible to maximize how many old regions we can add to it. 262 } 263 } else { 264 // The user asked for a fixed young gen so we'll fix the young gen 265 // whether the next GC is young or mixed. 266 young_list_target_length = _young_list_fixed_length; 267 } 268 269 result.second = young_list_target_length; 270 271 // We will try our best not to "eat" into the reserve. 272 uint absolute_max_length = 0; 273 if (_free_regions_at_end_of_collection > _reserve_regions) { 274 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 275 } 276 if (desired_max_length > absolute_max_length) { 277 desired_max_length = absolute_max_length; 278 } 279 280 // Make sure we don't go over the desired max length, nor under the 281 // desired min length. In case they clash, desired_min_length wins 282 // which is why that test is second. 283 if (young_list_target_length > desired_max_length) { 284 young_list_target_length = desired_max_length; 285 } 286 if (young_list_target_length < desired_min_length) { 287 young_list_target_length = desired_min_length; 288 } 289 290 assert(young_list_target_length > base_min_length, 291 "we should be able to allocate at least one eden region"); 292 assert(young_list_target_length >= absolute_min_length, "post-condition"); 293 294 result.first = young_list_target_length; 295 return result; 296 } 297 298 uint G1Policy::calculate_young_list_target_length(size_t rs_length, 299 uint base_min_length, 300 uint desired_min_length, 301 uint desired_max_length) const { 302 assert(use_adaptive_young_list_length(), "pre-condition"); 303 assert(collector_state()->in_young_only_phase(), "only call this for young GCs"); 304 305 // In case some edge-condition makes the desired max length too small... 306 if (desired_max_length <= desired_min_length) { 307 return desired_min_length; 308 } 309 310 // We'll adjust min_young_length and max_young_length not to include 311 // the already allocated young regions (i.e., so they reflect the 312 // min and max eden regions we'll allocate). The base_min_length 313 // will be reflected in the predictions by the 314 // survivor_regions_evac_time prediction. 315 assert(desired_min_length > base_min_length, "invariant"); 316 uint min_young_length = desired_min_length - base_min_length; 317 assert(desired_max_length > base_min_length, "invariant"); 318 uint max_young_length = desired_max_length - base_min_length; 319 320 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 321 const size_t pending_cards = _analytics->predict_pending_cards(); 322 const double base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length); 323 const uint available_free_regions = _free_regions_at_end_of_collection; 324 const uint base_free_regions = 325 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0; 326 327 // Here, we will make sure that the shortest young length that 328 // makes sense fits within the target pause time. 329 330 G1YoungLengthPredictor p(base_time_ms, 331 base_free_regions, 332 target_pause_time_ms, 333 this); 334 if (p.will_fit(min_young_length)) { 335 // The shortest young length will fit into the target pause time; 336 // we'll now check whether the absolute maximum number of young 337 // regions will fit in the target pause time. If not, we'll do 338 // a binary search between min_young_length and max_young_length. 339 if (p.will_fit(max_young_length)) { 340 // The maximum young length will fit into the target pause time. 341 // We are done so set min young length to the maximum length (as 342 // the result is assumed to be returned in min_young_length). 343 min_young_length = max_young_length; 344 } else { 345 // The maximum possible number of young regions will not fit within 346 // the target pause time so we'll search for the optimal 347 // length. The loop invariants are: 348 // 349 // min_young_length < max_young_length 350 // min_young_length is known to fit into the target pause time 351 // max_young_length is known not to fit into the target pause time 352 // 353 // Going into the loop we know the above hold as we've just 354 // checked them. Every time around the loop we check whether 355 // the middle value between min_young_length and 356 // max_young_length fits into the target pause time. If it 357 // does, it becomes the new min. If it doesn't, it becomes 358 // the new max. This way we maintain the loop invariants. 359 360 assert(min_young_length < max_young_length, "invariant"); 361 uint diff = (max_young_length - min_young_length) / 2; 362 while (diff > 0) { 363 uint young_length = min_young_length + diff; 364 if (p.will_fit(young_length)) { 365 min_young_length = young_length; 366 } else { 367 max_young_length = young_length; 368 } 369 assert(min_young_length < max_young_length, "invariant"); 370 diff = (max_young_length - min_young_length) / 2; 371 } 372 // The results is min_young_length which, according to the 373 // loop invariants, should fit within the target pause time. 374 375 // These are the post-conditions of the binary search above: 376 assert(min_young_length < max_young_length, 377 "otherwise we should have discovered that max_young_length " 378 "fits into the pause target and not done the binary search"); 379 assert(p.will_fit(min_young_length), 380 "min_young_length, the result of the binary search, should " 381 "fit into the pause target"); 382 assert(!p.will_fit(min_young_length + 1), 383 "min_young_length, the result of the binary search, should be " 384 "optimal, so no larger length should fit into the pause target"); 385 } 386 } else { 387 // Even the minimum length doesn't fit into the pause time 388 // target, return it as the result nevertheless. 389 } 390 return base_min_length + min_young_length; 391 } 392 393 double G1Policy::predict_survivor_regions_evac_time() const { 394 double survivor_regions_evac_time = 0.0; 395 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions(); 396 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); 397 it != survivor_regions->end(); 398 ++it) { 399 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase()); 400 } 401 return survivor_regions_evac_time; 402 } 403 404 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) { 405 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" ); 406 407 if (rs_length > _rs_length_prediction) { 408 // add 10% to avoid having to recalculate often 409 size_t rs_length_prediction = rs_length * 1100 / 1000; 410 update_rs_length_prediction(rs_length_prediction); 411 412 update_young_list_max_and_target_length(rs_length_prediction); 413 } 414 } 415 416 void G1Policy::update_rs_length_prediction() { 417 update_rs_length_prediction(_analytics->predict_rs_length()); 418 } 419 420 void G1Policy::update_rs_length_prediction(size_t prediction) { 421 if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) { 422 _rs_length_prediction = prediction; 423 } 424 } 425 426 void G1Policy::record_full_collection_start() { 427 _full_collection_start_sec = os::elapsedTime(); 428 // Release the future to-space so that it is available for compaction into. 429 collector_state()->set_in_young_only_phase(false); 430 collector_state()->set_in_full_gc(true); 431 _collection_set->clear_candidates(); 432 _pending_cards_at_gc_start = 0; 433 } 434 435 void G1Policy::record_full_collection_end() { 436 // Consider this like a collection pause for the purposes of allocation 437 // since last pause. 438 double end_sec = os::elapsedTime(); 439 double full_gc_time_sec = end_sec - _full_collection_start_sec; 440 double full_gc_time_ms = full_gc_time_sec * 1000.0; 441 442 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); 443 444 collector_state()->set_in_full_gc(false); 445 446 // "Nuke" the heuristics that control the young/mixed GC 447 // transitions and make sure we start with young GCs after the Full GC. 448 collector_state()->set_in_young_only_phase(true); 449 collector_state()->set_in_young_gc_before_mixed(false); 450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 451 collector_state()->set_in_initial_mark_gc(false); 452 collector_state()->set_mark_or_rebuild_in_progress(false); 453 collector_state()->set_clearing_next_bitmap(false); 454 455 _eden_surv_rate_group->start_adding_regions(); 456 // also call this on any additional surv rate groups 457 458 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 459 _survivor_surv_rate_group->reset(); 460 update_young_list_max_and_target_length(); 461 update_rs_length_prediction(); 462 463 _old_gen_alloc_tracker.reset_after_full_gc(); 464 465 record_pause(FullGC, _full_collection_start_sec, end_sec); 466 } 467 468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) { 469 log_debug(gc, refine, stats) 470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT 471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT, 472 kind, 473 stats.refinement_time().seconds() * MILLIUNITS, 474 stats.refined_cards(), 475 stats.precleaned_cards(), 476 stats.dirtied_cards()); 477 } 478 479 void G1Policy::record_concurrent_refinement_stats() { 480 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 481 _pending_cards_at_gc_start = dcqs.num_cards(); 482 483 // Collect per-thread stats, mostly from mutator activity. 484 G1ConcurrentRefineStats mut_stats = dcqs.get_and_reset_refinement_stats(); 485 486 // Collect specialized concurrent refinement thread stats. 487 G1ConcurrentRefine* cr = _g1h->concurrent_refine(); 488 G1ConcurrentRefineStats cr_stats = cr->get_and_reset_refinement_stats(); 489 490 G1ConcurrentRefineStats total_stats = mut_stats + cr_stats; 491 492 log_refinement_stats("Mutator", mut_stats); 493 log_refinement_stats("Concurrent", cr_stats); 494 log_refinement_stats("Total", total_stats); 495 496 // Record the rate at which cards were refined. 497 // Don't update the rate if the current sample is empty or time is zero. 498 Tickspan refinement_time = total_stats.refinement_time(); 499 size_t refined_cards = total_stats.refined_cards(); 500 if ((refined_cards > 0) && (refinement_time > Tickspan())) { 501 double rate = refined_cards / (refinement_time.seconds() * MILLIUNITS); 502 _analytics->report_concurrent_refine_rate_ms(rate); 503 log_debug(gc, refine, stats)("Concurrent refinement rate: %.2f cards/ms", rate); 504 } 505 506 // Record mutator's card logging rate. 507 double mut_start_time = _analytics->prev_collection_pause_end_ms(); 508 double mut_end_time = phase_times()->cur_collection_start_sec() * MILLIUNITS; 509 double mut_time = mut_end_time - mut_start_time; 510 // Unlike above for conc-refine rate, here we should not require a 511 // non-empty sample, since an application could go some time with only 512 // young-gen or filtered out writes. But we'll ignore unusually short 513 // sample periods, as they may just pollute the predictions. 514 if (mut_time > 1.0) { // Require > 1ms sample time. 515 double dirtied_rate = total_stats.dirtied_cards() / mut_time; 516 _analytics->report_dirtied_cards_rate_ms(dirtied_rate); 517 log_debug(gc, refine, stats)("Generate dirty cards rate: %.2f cards/ms", dirtied_rate); 518 } 519 } 520 521 void G1Policy::record_collection_pause_start(double start_time_sec) { 522 // We only need to do this here as the policy will only be applied 523 // to the GC we're about to start. so, no point is calculating this 524 // every time we calculate / recalculate the target young length. 525 update_survivors_policy(); 526 527 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(), 528 "Maximum survivor regions %u plus used regions %u exceeds max regions %u", 529 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions()); 530 assert_used_and_recalculate_used_equal(_g1h); 531 532 phase_times()->record_cur_collection_start_sec(start_time_sec); 533 534 record_concurrent_refinement_stats(); 535 536 _collection_set->reset_bytes_used_before(); 537 538 // do that for any other surv rate groups 539 _eden_surv_rate_group->stop_adding_regions(); 540 _survivors_age_table.clear(); 541 542 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); 543 } 544 545 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { 546 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 547 collector_state()->set_in_initial_mark_gc(false); 548 } 549 550 void G1Policy::record_concurrent_mark_remark_start() { 551 _mark_remark_start_sec = os::elapsedTime(); 552 } 553 554 void G1Policy::record_concurrent_mark_remark_end() { 555 double end_time_sec = os::elapsedTime(); 556 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 557 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); 558 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 559 560 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 561 } 562 563 void G1Policy::record_concurrent_mark_cleanup_start() { 564 _mark_cleanup_start_sec = os::elapsedTime(); 565 } 566 567 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 568 return phase_times()->average_time_ms(phase); 569 } 570 571 double G1Policy::young_other_time_ms() const { 572 return phase_times()->young_cset_choice_time_ms() + 573 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet); 574 } 575 576 double G1Policy::non_young_other_time_ms() const { 577 return phase_times()->non_young_cset_choice_time_ms() + 578 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet); 579 } 580 581 double G1Policy::other_time_ms(double pause_time_ms) const { 582 return pause_time_ms - phase_times()->cur_collection_par_time_ms(); 583 } 584 585 double G1Policy::constant_other_time_ms(double pause_time_ms) const { 586 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms() - phase_times()->total_rebuild_freelist_time_ms(); 587 } 588 589 bool G1Policy::about_to_start_mixed_phase() const { 590 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed(); 591 } 592 593 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 594 if (about_to_start_mixed_phase()) { 595 return false; 596 } 597 598 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 599 600 size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); 601 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 602 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 603 604 bool result = false; 605 if (marking_request_bytes > marking_initiating_used_threshold) { 606 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed(); 607 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", 608 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", 609 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source); 610 } 611 612 return result; 613 } 614 615 double G1Policy::logged_cards_processing_time() const { 616 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); 617 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); 618 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + 619 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); 620 // This may happen if there are duplicate cards in different log buffers. 621 if (logged_dirty_cards > scan_heap_roots_cards) { 622 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB); 623 } 624 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB); 625 } 626 627 // Anything below that is considered to be zero 628 #define MIN_TIMER_GRANULARITY 0.0000001 629 630 void G1Policy::record_collection_pause_end(double pause_time_ms) { 631 G1GCPhaseTimes* p = phase_times(); 632 633 double end_time_sec = os::elapsedTime(); 634 635 bool this_pause_included_initial_mark = false; 636 bool this_pause_was_young_only = collector_state()->in_young_only_phase(); 637 638 bool update_stats = !_g1h->evacuation_failed(); 639 640 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 641 642 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 643 644 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc(); 645 if (this_pause_included_initial_mark) { 646 record_concurrent_mark_init_end(0.0); 647 } else { 648 maybe_start_marking(); 649 } 650 651 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); 652 if (app_time_ms < MIN_TIMER_GRANULARITY) { 653 // This usually happens due to the timer not having the required 654 // granularity. Some Linuxes are the usual culprits. 655 // We'll just set it to something (arbitrarily) small. 656 app_time_ms = 1.0; 657 } 658 659 if (update_stats) { 660 // We maintain the invariant that all objects allocated by mutator 661 // threads will be allocated out of eden regions. So, we can use 662 // the eden region number allocated since the previous GC to 663 // calculate the application's allocate rate. The only exception 664 // to that is humongous objects that are allocated separately. But 665 // given that humongous object allocations do not really affect 666 // either the pause's duration nor when the next pause will take 667 // place we can safely ignore them here. 668 uint regions_allocated = _collection_set->eden_region_length(); 669 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 670 _analytics->report_alloc_rate_ms(alloc_rate_ms); 671 672 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); 673 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); 674 } 675 676 if (collector_state()->in_young_gc_before_mixed()) { 677 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC"); 678 // This has been the young GC before we start doing mixed GCs. We already 679 // decided to start mixed GCs much earlier, so there is nothing to do except 680 // advancing the state. 681 collector_state()->set_in_young_only_phase(false); 682 collector_state()->set_in_young_gc_before_mixed(false); 683 } else if (!this_pause_was_young_only) { 684 // This is a mixed GC. Here we decide whether to continue doing more 685 // mixed GCs or not. 686 if (!next_gc_should_be_mixed("continue mixed GCs", 687 "do not continue mixed GCs")) { 688 collector_state()->set_in_young_only_phase(true); 689 690 clear_collection_set_candidates(); 691 maybe_start_marking(); 692 } 693 } 694 695 _eden_surv_rate_group->start_adding_regions(); 696 697 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC); 698 if (update_stats) { 699 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) + 700 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); 701 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase. 702 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) + 703 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) + 704 total_log_buffer_cards; 705 706 // The threshold for the number of cards in a given sampling which we consider 707 // large enough so that the impact from setup and other costs is negligible. 708 size_t const CardsNumSamplingThreshold = 10; 709 710 if (total_cards_merged > CardsNumSamplingThreshold) { 711 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) + 712 average_time_ms(G1GCPhaseTimes::MergeRS) + 713 average_time_ms(G1GCPhaseTimes::MergeHCC) + 714 average_time_ms(G1GCPhaseTimes::MergeLB) + 715 average_time_ms(G1GCPhaseTimes::OptMergeRS); 716 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only); 717 } 718 719 // Update prediction for card scan 720 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + 721 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); 722 723 if (total_cards_scanned > CardsNumSamplingThreshold) { 724 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) + 725 average_time_ms(G1GCPhaseTimes::OptScanHR); 726 727 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only); 728 } 729 730 // Update prediction for the ratio between cards from the remembered 731 // sets and actually scanned cards from the remembered sets. 732 // Cards from the remembered sets are all cards not duplicated by cards from 733 // the logs. 734 // Due to duplicates in the log buffers, the number of actually scanned cards 735 // can be smaller than the cards in the log buffers. 736 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0; 737 double merge_to_scan_ratio = 0.0; 738 if (total_cards_scanned > 0) { 739 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned; 740 } 741 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only); 742 743 const size_t recorded_rs_length = _collection_set->recorded_rs_length(); 744 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0; 745 _analytics->report_rs_length_diff(rs_length_diff); 746 747 // Update prediction for copy cost per byte 748 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes); 749 750 if (copied_bytes > 0) { 751 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes; 752 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress()); 753 } 754 755 if (_collection_set->young_region_length() > 0) { 756 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / 757 _collection_set->young_region_length()); 758 } 759 760 if (_collection_set->old_region_length() > 0) { 761 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / 762 _collection_set->old_region_length()); 763 } 764 765 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); 766 767 // Do not update RS lengths and the number of pending cards with information from mixed gc: 768 // these are is wildly different to during young only gc and mess up young gen sizing right 769 // after the mixed gc phase. 770 // During mixed gc we do not use them for young gen sizing. 771 if (this_pause_was_young_only) { 772 _analytics->report_pending_cards((double) _pending_cards_at_gc_start); 773 _analytics->report_rs_length((double) _rs_length); 774 } 775 } 776 777 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()), 778 "If the last pause has been an initial mark, we should not have been in the marking window"); 779 if (this_pause_included_initial_mark) { 780 collector_state()->set_mark_or_rebuild_in_progress(true); 781 } 782 783 _free_regions_at_end_of_collection = _g1h->num_free_regions(); 784 785 update_rs_length_prediction(); 786 787 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely 788 // that in this case we are not running in a "normal" operating mode. 789 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { 790 // IHOP control wants to know the expected young gen length if it were not 791 // restrained by the heap reserve. Using the actual length would make the 792 // prediction too small and the limit the young gen every time we get to the 793 // predicted target occupancy. 794 size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); 795 796 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); 797 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), 798 _old_gen_alloc_tracker.last_cycle_old_bytes(), 799 last_unrestrained_young_length * HeapRegion::GrainBytes, 800 this_pause_was_young_only); 801 802 _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); 803 } else { 804 // Any garbage collection triggered as periodic collection resets the time-to-mixed 805 // measurement. Periodic collection typically means that the application is "inactive", i.e. 806 // the marking threads may have received an uncharacterisic amount of cpu time 807 // for completing the marking, i.e. are faster than expected. 808 // This skews the predicted marking length towards smaller values which might cause 809 // the mark start being too late. 810 _initial_mark_to_mixed.reset(); 811 } 812 813 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 814 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 815 816 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) { 817 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." 818 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms", 819 scan_logged_cards_time_goal_ms, merge_hcc_time_ms); 820 821 scan_logged_cards_time_goal_ms = 0; 822 } else { 823 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms; 824 } 825 826 double const logged_cards_time = logged_cards_processing_time(); 827 828 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms", 829 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms); 830 831 _g1h->concurrent_refine()->adjust(logged_cards_time, 832 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards), 833 scan_logged_cards_time_goal_ms); 834 } 835 836 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){ 837 if (G1UseAdaptiveIHOP) { 838 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 839 predictor, 840 G1ReservePercent, 841 G1HeapWastePercent); 842 } else { 843 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); 844 } 845 } 846 847 void G1Policy::update_ihop_prediction(double mutator_time_s, 848 size_t mutator_alloc_bytes, 849 size_t young_gen_size, 850 bool this_gc_was_young_only) { 851 // Always try to update IHOP prediction. Even evacuation failures give information 852 // about e.g. whether to start IHOP earlier next time. 853 854 // Avoid using really small application times that might create samples with 855 // very high or very low values. They may be caused by e.g. back-to-back gcs. 856 double const min_valid_time = 1e-6; 857 858 bool report = false; 859 860 double marking_to_mixed_time = -1.0; 861 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) { 862 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 863 assert(marking_to_mixed_time > 0.0, 864 "Initial mark to mixed time must be larger than zero but is %.3f", 865 marking_to_mixed_time); 866 if (marking_to_mixed_time > min_valid_time) { 867 _ihop_control->update_marking_length(marking_to_mixed_time); 868 report = true; 869 } 870 } 871 872 // As an approximation for the young gc promotion rates during marking we use 873 // all of them. In many applications there are only a few if any young gcs during 874 // marking, which makes any prediction useless. This increases the accuracy of the 875 // prediction. 876 if (this_gc_was_young_only && mutator_time_s > min_valid_time) { 877 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 878 report = true; 879 } 880 881 if (report) { 882 report_ihop_statistics(); 883 } 884 } 885 886 void G1Policy::report_ihop_statistics() { 887 _ihop_control->print(); 888 } 889 890 void G1Policy::print_phases() { 891 phase_times()->print(); 892 } 893 894 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards, 895 size_t rs_length) const { 896 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase()); 897 return 898 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) + 899 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) + 900 _analytics->predict_constant_other_time_ms() + 901 predict_survivor_regions_evac_time(); 902 } 903 904 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const { 905 size_t rs_length = _analytics->predict_rs_length(); 906 return predict_base_elapsed_time_ms(pending_cards, rs_length); 907 } 908 909 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { 910 size_t bytes_to_copy; 911 if (!hr->is_young()) { 912 bytes_to_copy = hr->max_live_bytes(); 913 } else { 914 bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor)); 915 } 916 return bytes_to_copy; 917 } 918 919 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const { 920 if (count == 0) { 921 return 0.0; 922 } 923 size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes; 924 if (bytes_to_copy != NULL) { 925 *bytes_to_copy = expected_bytes; 926 } 927 return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress()); 928 } 929 930 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const { 931 size_t const bytes_to_copy = predict_bytes_to_copy(hr); 932 return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress()); 933 } 934 935 double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr, 936 bool for_young_gc) const { 937 size_t rs_length = hr->rem_set()->occupied(); 938 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc); 939 940 double region_elapsed_time_ms = 941 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) + 942 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase()); 943 944 // The prediction of the "other" time for this region is based 945 // upon the region type and NOT the GC type. 946 if (hr->is_young()) { 947 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); 948 } else { 949 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); 950 } 951 return region_elapsed_time_ms; 952 } 953 954 double G1Policy::predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const { 955 return predict_region_non_copy_time_ms(hr, for_young_gc) + predict_region_copy_time_ms(hr); 956 } 957 958 bool G1Policy::should_allocate_mutator_region() const { 959 uint young_list_length = _g1h->young_regions_count(); 960 uint young_list_target_length = _young_list_target_length; 961 return young_list_length < young_list_target_length; 962 } 963 964 bool G1Policy::can_expand_young_list() const { 965 uint young_list_length = _g1h->young_regions_count(); 966 uint young_list_max_length = _young_list_max_length; 967 return young_list_length < young_list_max_length; 968 } 969 970 bool G1Policy::use_adaptive_young_list_length() const { 971 return _young_gen_sizer->use_adaptive_young_list_length(); 972 } 973 974 size_t G1Policy::desired_survivor_size(uint max_regions) const { 975 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions; 976 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); 977 } 978 979 void G1Policy::print_age_table() { 980 _survivors_age_table.print_age_table(_tenuring_threshold); 981 } 982 983 void G1Policy::update_max_gc_locker_expansion() { 984 uint expansion_region_num = 0; 985 if (GCLockerEdenExpansionPercent > 0) { 986 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 987 double expansion_region_num_d = perc * (double) _young_list_target_length; 988 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 989 // less than 1.0) we'll get 1. 990 expansion_region_num = (uint) ceil(expansion_region_num_d); 991 } else { 992 assert(expansion_region_num == 0, "sanity"); 993 } 994 _young_list_max_length = _young_list_target_length + expansion_region_num; 995 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 996 } 997 998 // Calculates survivor space parameters. 999 void G1Policy::update_survivors_policy() { 1000 double max_survivor_regions_d = 1001 (double) _young_list_target_length / (double) SurvivorRatio; 1002 1003 // Calculate desired survivor size based on desired max survivor regions (unconstrained 1004 // by remaining heap). Otherwise we may cause undesired promotions as we are 1005 // already getting close to end of the heap, impacting performance even more. 1006 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d); 1007 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions); 1008 1009 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size); 1010 if (UsePerfData) { 1011 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold); 1012 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize); 1013 } 1014 // The real maximum survivor size is bounded by the number of regions that can 1015 // be allocated into. 1016 _max_survivor_regions = MIN2(desired_max_survivor_regions, 1017 _g1h->num_free_or_available_regions()); 1018 } 1019 1020 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1021 // We actually check whether we are marking here and not if we are in a 1022 // reclamation phase. This means that we will schedule a concurrent mark 1023 // even while we are still in the process of reclaiming memory. 1024 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); 1025 if (!during_cycle) { 1026 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); 1027 collector_state()->set_initiate_conc_mark_if_possible(true); 1028 return true; 1029 } else { 1030 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); 1031 return false; 1032 } 1033 } 1034 1035 void G1Policy::initiate_conc_mark() { 1036 collector_state()->set_in_initial_mark_gc(true); 1037 collector_state()->set_initiate_conc_mark_if_possible(false); 1038 } 1039 1040 void G1Policy::decide_on_conc_mark_initiation() { 1041 // We are about to decide on whether this pause will be an 1042 // initial-mark pause. 1043 1044 // First, collector_state()->in_initial_mark_gc() should not be already set. We 1045 // will set it here if we have to. However, it should be cleared by 1046 // the end of the pause (it's only set for the duration of an 1047 // initial-mark pause). 1048 assert(!collector_state()->in_initial_mark_gc(), "pre-condition"); 1049 1050 if (collector_state()->initiate_conc_mark_if_possible()) { 1051 // We had noticed on a previous pause that the heap occupancy has 1052 // gone over the initiating threshold and we should start a 1053 // concurrent marking cycle. Or we've been explicitly requested 1054 // to start a concurrent marking cycle. Either way, we initiate 1055 // one if not inhibited for some reason. 1056 1057 GCCause::Cause cause = _g1h->gc_cause(); 1058 if ((cause != GCCause::_wb_breakpoint) && 1059 ConcurrentGCBreakpoints::is_controlled()) { 1060 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); 1061 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { 1062 // Initiate a new initial mark if there is no marking or reclamation going on. 1063 initiate_conc_mark(); 1064 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); 1065 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || 1066 (cause == GCCause::_wb_breakpoint)) { 1067 // Initiate a user requested initial mark or run_to a breakpoint. 1068 // An initial mark must be young only GC, so the collector state 1069 // must be updated to reflect this. 1070 collector_state()->set_in_young_only_phase(true); 1071 collector_state()->set_in_young_gc_before_mixed(false); 1072 1073 // We might have ended up coming here about to start a mixed phase with a collection set 1074 // active. The following remark might change the change the "evacuation efficiency" of 1075 // the regions in this set, leading to failing asserts later. 1076 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here. 1077 clear_collection_set_candidates(); 1078 abort_time_to_mixed_tracking(); 1079 initiate_conc_mark(); 1080 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)", 1081 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user"); 1082 } else { 1083 // The concurrent marking thread is still finishing up the 1084 // previous cycle. If we start one right now the two cycles 1085 // overlap. In particular, the concurrent marking thread might 1086 // be in the process of clearing the next marking bitmap (which 1087 // we will use for the next cycle if we start one). Starting a 1088 // cycle now will be bad given that parts of the marking 1089 // information might get cleared by the marking thread. And we 1090 // cannot wait for the marking thread to finish the cycle as it 1091 // periodically yields while clearing the next marking bitmap 1092 // and, if it's in a yield point, it's waiting for us to 1093 // finish. So, at this point we will not start a cycle and we'll 1094 // let the concurrent marking thread complete the last one. 1095 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); 1096 } 1097 } 1098 } 1099 1100 void G1Policy::record_concurrent_mark_cleanup_end() { 1101 G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions()); 1102 _collection_set->set_candidates(candidates); 1103 1104 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs"); 1105 if (!mixed_gc_pending) { 1106 clear_collection_set_candidates(); 1107 abort_time_to_mixed_tracking(); 1108 } 1109 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); 1110 collector_state()->set_mark_or_rebuild_in_progress(false); 1111 1112 double end_sec = os::elapsedTime(); 1113 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1114 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); 1115 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); 1116 1117 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1118 } 1119 1120 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { 1121 return percent_of(reclaimable_bytes, _g1h->capacity()); 1122 } 1123 1124 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { 1125 virtual bool do_heap_region(HeapRegion* r) { 1126 r->rem_set()->clear_locked(true /* only_cardset */); 1127 return false; 1128 } 1129 }; 1130 1131 void G1Policy::clear_collection_set_candidates() { 1132 // Clear remembered sets of remaining candidate regions and the actual candidate 1133 // set. 1134 G1ClearCollectionSetCandidateRemSets cl; 1135 _collection_set->candidates()->iterate(&cl); 1136 _collection_set->clear_candidates(); 1137 } 1138 1139 void G1Policy::maybe_start_marking() { 1140 if (need_to_start_conc_mark("end of GC")) { 1141 // Note: this might have already been set, if during the last 1142 // pause we decided to start a cycle but at the beginning of 1143 // this pause we decided to postpone it. That's OK. 1144 collector_state()->set_initiate_conc_mark_if_possible(true); 1145 } 1146 } 1147 1148 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { 1149 assert(!collector_state()->in_full_gc(), "must be"); 1150 if (collector_state()->in_initial_mark_gc()) { 1151 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1152 return InitialMarkGC; 1153 } else if (collector_state()->in_young_gc_before_mixed()) { 1154 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1155 return LastYoungGC; 1156 } else if (collector_state()->in_mixed_phase()) { 1157 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1158 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1159 return MixedGC; 1160 } else { 1161 assert(!collector_state()->in_initial_mark_gc(), "must be"); 1162 assert(!collector_state()->in_young_gc_before_mixed(), "must be"); 1163 return YoungOnlyGC; 1164 } 1165 } 1166 1167 void G1Policy::record_pause(PauseKind kind, double start, double end) { 1168 // Manage the MMU tracker. For some reason it ignores Full GCs. 1169 if (kind != FullGC) { 1170 _mmu_tracker->add_pause(start, end); 1171 } 1172 // Manage the mutator time tracking from initial mark to first mixed gc. 1173 switch (kind) { 1174 case FullGC: 1175 abort_time_to_mixed_tracking(); 1176 break; 1177 case Cleanup: 1178 case Remark: 1179 case YoungOnlyGC: 1180 case LastYoungGC: 1181 _initial_mark_to_mixed.add_pause(end - start); 1182 break; 1183 case InitialMarkGC: 1184 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { 1185 _initial_mark_to_mixed.record_initial_mark_end(end); 1186 } 1187 break; 1188 case MixedGC: 1189 _initial_mark_to_mixed.record_mixed_gc_start(start); 1190 break; 1191 default: 1192 ShouldNotReachHere(); 1193 } 1194 } 1195 1196 void G1Policy::abort_time_to_mixed_tracking() { 1197 _initial_mark_to_mixed.reset(); 1198 } 1199 1200 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, 1201 const char* false_action_str) const { 1202 G1CollectionSetCandidates* candidates = _collection_set->candidates(); 1203 1204 if (candidates->is_empty()) { 1205 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); 1206 return false; 1207 } 1208 1209 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1210 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); 1211 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); 1212 double threshold = (double) G1HeapWastePercent; 1213 if (reclaimable_percent <= threshold) { 1214 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1215 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); 1216 return false; 1217 } 1218 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, 1219 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); 1220 return true; 1221 } 1222 1223 uint G1Policy::calc_min_old_cset_length() const { 1224 // The min old CSet region bound is based on the maximum desired 1225 // number of mixed GCs after a cycle. I.e., even if some old regions 1226 // look expensive, we should add them to the CSet anyway to make 1227 // sure we go through the available old regions in no more than the 1228 // maximum desired number of mixed GCs. 1229 // 1230 // The calculation is based on the number of marked regions we added 1231 // to the CSet candidates in the first place, not how many remain, so 1232 // that the result is the same during all mixed GCs that follow a cycle. 1233 1234 const size_t region_num = _collection_set->candidates()->num_regions(); 1235 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1236 size_t result = region_num / gc_num; 1237 // emulate ceiling 1238 if (result * gc_num < region_num) { 1239 result += 1; 1240 } 1241 return (uint) result; 1242 } 1243 1244 uint G1Policy::calc_max_old_cset_length() const { 1245 // The max old CSet region bound is based on the threshold expressed 1246 // as a percentage of the heap size. I.e., it should bound the 1247 // number of old regions added to the CSet irrespective of how many 1248 // of them are available. 1249 1250 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1251 const size_t region_num = g1h->num_regions(); 1252 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1253 size_t result = region_num * perc / 100; 1254 // emulate ceiling 1255 if (100 * result < region_num * perc) { 1256 result += 1; 1257 } 1258 return (uint) result; 1259 } 1260 1261 void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, 1262 double time_remaining_ms, 1263 uint& num_initial_regions, 1264 uint& num_optional_regions) { 1265 assert(candidates != NULL, "Must be"); 1266 1267 num_initial_regions = 0; 1268 num_optional_regions = 0; 1269 uint num_expensive_regions = 0; 1270 1271 double predicted_old_time_ms = 0.0; 1272 double predicted_initial_time_ms = 0.0; 1273 double predicted_optional_time_ms = 0.0; 1274 1275 double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction(); 1276 1277 const uint min_old_cset_length = calc_min_old_cset_length(); 1278 const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length()); 1279 const uint max_optional_regions = max_old_cset_length - min_old_cset_length; 1280 bool check_time_remaining = use_adaptive_young_list_length(); 1281 1282 uint candidate_idx = candidates->cur_idx(); 1283 1284 log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, " 1285 "time remaining %1.2fms, optional threshold %1.2fms", 1286 min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms); 1287 1288 HeapRegion* hr = candidates->at(candidate_idx); 1289 while (hr != NULL) { 1290 if (num_initial_regions + num_optional_regions >= max_old_cset_length) { 1291 // Added maximum number of old regions to the CSet. 1292 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). " 1293 "Initial %u regions, optional %u regions", 1294 num_initial_regions, num_optional_regions); 1295 break; 1296 } 1297 1298 // Stop adding regions if the remaining reclaimable space is 1299 // not above G1HeapWastePercent. 1300 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); 1301 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); 1302 double threshold = (double) G1HeapWastePercent; 1303 if (reclaimable_percent <= threshold) { 1304 // We've added enough old regions that the amount of uncollected 1305 // reclaimable space is at or below the waste threshold. Stop 1306 // adding old regions to the CSet. 1307 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). " 1308 "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%", 1309 byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes), 1310 reclaimable_percent, G1HeapWastePercent); 1311 break; 1312 } 1313 1314 double predicted_time_ms = predict_region_total_time_ms(hr, false); 1315 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 1316 // Add regions to old set until we reach the minimum amount 1317 if (num_initial_regions < min_old_cset_length) { 1318 predicted_old_time_ms += predicted_time_ms; 1319 num_initial_regions++; 1320 // Record the number of regions added with no time remaining 1321 if (time_remaining_ms == 0.0) { 1322 num_expensive_regions++; 1323 } 1324 } else if (!check_time_remaining) { 1325 // In the non-auto-tuning case, we'll finish adding regions 1326 // to the CSet if we reach the minimum. 1327 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min)."); 1328 break; 1329 } else { 1330 // Keep adding regions to old set until we reach the optional threshold 1331 if (time_remaining_ms > optional_threshold_ms) { 1332 predicted_old_time_ms += predicted_time_ms; 1333 num_initial_regions++; 1334 } else if (time_remaining_ms > 0) { 1335 // Keep adding optional regions until time is up. 1336 assert(num_optional_regions < max_optional_regions, "Should not be possible."); 1337 predicted_optional_time_ms += predicted_time_ms; 1338 num_optional_regions++; 1339 } else { 1340 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high)."); 1341 break; 1342 } 1343 } 1344 hr = candidates->at(++candidate_idx); 1345 } 1346 if (hr == NULL) { 1347 log_debug(gc, ergo, cset)("Old candidate collection set empty."); 1348 } 1349 1350 if (num_expensive_regions > 0) { 1351 log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.", 1352 num_expensive_regions); 1353 } 1354 1355 log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, " 1356 "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f", 1357 num_initial_regions, num_optional_regions, 1358 predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms); 1359 } 1360 1361 void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, 1362 uint const max_optional_regions, 1363 double time_remaining_ms, 1364 uint& num_optional_regions) { 1365 assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase"); 1366 1367 num_optional_regions = 0; 1368 double prediction_ms = 0; 1369 uint candidate_idx = candidates->cur_idx(); 1370 1371 HeapRegion* r = candidates->at(candidate_idx); 1372 while (num_optional_regions < max_optional_regions) { 1373 assert(r != NULL, "Region must exist"); 1374 prediction_ms += predict_region_total_time_ms(r, false); 1375 1376 if (prediction_ms > time_remaining_ms) { 1377 log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.", 1378 prediction_ms, r->hrm_index(), time_remaining_ms); 1379 break; 1380 } 1381 // This region will be included in the next optional evacuation. 1382 1383 time_remaining_ms -= prediction_ms; 1384 num_optional_regions++; 1385 r = candidates->at(++candidate_idx); 1386 } 1387 1388 log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms", 1389 num_optional_regions, max_optional_regions, prediction_ms); 1390 } 1391 1392 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { 1393 note_start_adding_survivor_regions(); 1394 1395 HeapRegion* last = NULL; 1396 for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin(); 1397 it != survivors->regions()->end(); 1398 ++it) { 1399 HeapRegion* curr = *it; 1400 set_region_survivor(curr); 1401 1402 // The region is a non-empty survivor so let's add it to 1403 // the incremental collection set for the next evacuation 1404 // pause. 1405 _collection_set->add_survivor_regions(curr); 1406 1407 last = curr; 1408 } 1409 note_stop_adding_survivor_regions(); 1410 1411 // Don't clear the survivor list handles until the start of 1412 // the next evacuation pause - we need it in order to re-tag 1413 // the survivor regions from this evacuation pause as 'young' 1414 // at the start of the next. 1415 }