1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef __clang_major__ 26 // FIXME, formats have issues. Disable this macro definition, compile, and study warnings for more information. 27 #define ATTRIBUTE_PRINTF(x,y) 28 #endif 29 30 #include "precompiled.hpp" 31 #include "gc_implementation/g1/concurrentG1Refine.hpp" 32 #include "gc_implementation/g1/concurrentMark.hpp" 33 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 34 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 35 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 36 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 38 #include "gc_implementation/g1/g1Log.hpp" 39 #include "gc_implementation/g1/heapRegionRemSet.hpp" 40 #include "gc_implementation/shared/gcPolicyCounters.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/java.hpp" 43 #include "runtime/mutexLocker.hpp" 44 #include "utilities/debug.hpp" 45 46 // Different defaults for different number of GC threads 47 // They were chosen by running GCOld and SPECjbb on debris with different 48 // numbers of GC threads and choosing them based on the results 49 50 // all the same 51 static double rs_length_diff_defaults[] = { 52 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 53 }; 54 55 static double cost_per_card_ms_defaults[] = { 56 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 57 }; 58 59 // all the same 60 static double young_cards_per_entry_ratio_defaults[] = { 61 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 62 }; 63 64 static double cost_per_entry_ms_defaults[] = { 65 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 66 }; 67 68 static double cost_per_byte_ms_defaults[] = { 69 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 70 }; 71 72 // these should be pretty consistent 73 static double constant_other_time_ms_defaults[] = { 74 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 75 }; 76 77 78 static double young_other_cost_per_region_ms_defaults[] = { 79 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 80 }; 81 82 static double non_young_other_cost_per_region_ms_defaults[] = { 83 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 84 }; 85 86 G1CollectorPolicy::G1CollectorPolicy() : 87 _parallel_gc_threads(ParallelGCThreads), 88 89 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 _stop_world_start(0.0), 91 92 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 93 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 94 95 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _prev_collection_pause_end_ms(0.0), 97 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 105 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 106 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 107 _non_young_other_cost_per_region_ms_seq( 108 new TruncatedSeq(TruncatedSeqLength)), 109 110 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 111 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 112 113 _pause_time_target_ms((double) MaxGCPauseMillis), 114 115 _gcs_are_young(true), 116 117 _during_marking(false), 118 _in_marking_window(false), 119 _in_marking_window_im(false), 120 121 _recent_prev_end_times_for_all_gcs_sec( 122 new TruncatedSeq(NumPrevPausesForHeuristics)), 123 124 _recent_avg_pause_time_ratio(0.0), 125 126 _initiate_conc_mark_if_possible(false), 127 _during_initial_mark_pause(false), 128 _last_young_gc(false), 129 _last_gc_was_young(false), 130 131 _eden_used_bytes_before_gc(0), 132 _survivor_used_bytes_before_gc(0), 133 _heap_used_bytes_before_gc(0), 134 _metaspace_used_bytes_before_gc(0), 135 _eden_capacity_bytes_before_gc(0), 136 _heap_capacity_bytes_before_gc(0), 137 138 _eden_cset_region_length(0), 139 _survivor_cset_region_length(0), 140 _old_cset_region_length(0), 141 142 _collection_set(NULL), 143 _collection_set_bytes_used_before(0), 144 145 // Incremental CSet attributes 146 _inc_cset_build_state(Inactive), 147 _inc_cset_head(NULL), 148 _inc_cset_tail(NULL), 149 _inc_cset_bytes_used_before(0), 150 _inc_cset_max_finger(NULL), 151 _inc_cset_recorded_rs_lengths(0), 152 _inc_cset_recorded_rs_lengths_diffs(0), 153 _inc_cset_predicted_elapsed_time_ms(0.0), 154 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 155 156 // add here any more surv rate groups 157 _recorded_survivor_regions(0), 158 _recorded_survivor_head(NULL), 159 _recorded_survivor_tail(NULL), 160 _survivors_age_table(true), 161 162 _gc_overhead_perc(0.0) { 163 164 uintx confidence_perc = G1ConfidencePercent; 165 // Put an artificial ceiling on this so that it's not set to a silly value. 166 if (confidence_perc > 100) { 167 confidence_perc = 100; 168 warning("G1ConfidencePercent is set to a value that is too large, " 169 "it's been updated to %u", confidence_perc); 170 } 171 // '_sigma' must be initialized before the SurvRateGroups below because they 172 // indirecty access '_sigma' trough the 'this' pointer in their constructor. 173 _sigma = (double) confidence_perc / 100.0; 174 175 _short_lived_surv_rate_group = 176 new SurvRateGroup(this, "Short Lived", G1YoungSurvRateNumRegionsSummary); 177 _survivor_surv_rate_group = 178 new SurvRateGroup(this, "Survivor", G1YoungSurvRateNumRegionsSummary); 179 180 // Set up the region size and associated fields. Given that the 181 // policy is created before the heap, we have to set this up here, 182 // so it's done as soon as possible. 183 184 // It would have been natural to pass initial_heap_byte_size() and 185 // max_heap_byte_size() to setup_heap_region_size() but those have 186 // not been set up at this point since they should be aligned with 187 // the region size. So, there is a circular dependency here. We base 188 // the region size on the heap size, but the heap size should be 189 // aligned with the region size. To get around this we use the 190 // unaligned values for the heap. 191 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 192 HeapRegionRemSet::setup_remset_size(); 193 194 G1ErgoVerbose::initialize(); 195 if (PrintAdaptiveSizePolicy) { 196 // Currently, we only use a single switch for all the heuristics. 197 G1ErgoVerbose::set_enabled(true); 198 // Given that we don't currently have a verboseness level 199 // parameter, we'll hardcode this to high. This can be easily 200 // changed in the future. 201 G1ErgoVerbose::set_level(ErgoHigh); 202 } else { 203 G1ErgoVerbose::set_enabled(false); 204 } 205 206 // Verify PLAB sizes 207 const size_t region_size = HeapRegion::GrainWords; 208 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 209 char buffer[128]; 210 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT, 211 OldPLABSize > region_size ? "Old" : "Young", region_size); 212 vm_exit_during_initialization(buffer); 213 } 214 215 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 216 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 217 218 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 219 220 int index = MIN2(_parallel_gc_threads - 1, 7); 221 222 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 223 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 224 _young_cards_per_entry_ratio_seq->add( 225 young_cards_per_entry_ratio_defaults[index]); 226 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 227 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 228 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 229 _young_other_cost_per_region_ms_seq->add( 230 young_other_cost_per_region_ms_defaults[index]); 231 _non_young_other_cost_per_region_ms_seq->add( 232 non_young_other_cost_per_region_ms_defaults[index]); 233 234 // Below, we might need to calculate the pause time target based on 235 // the pause interval. When we do so we are going to give G1 maximum 236 // flexibility and allow it to do pauses when it needs to. So, we'll 237 // arrange that the pause interval to be pause time target + 1 to 238 // ensure that a) the pause time target is maximized with respect to 239 // the pause interval and b) we maintain the invariant that pause 240 // time target < pause interval. If the user does not want this 241 // maximum flexibility, they will have to set the pause interval 242 // explicitly. 243 244 // First make sure that, if either parameter is set, its value is 245 // reasonable. 246 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 247 if (MaxGCPauseMillis < 1) { 248 vm_exit_during_initialization("MaxGCPauseMillis should be " 249 "greater than 0"); 250 } 251 } 252 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 253 if (GCPauseIntervalMillis < 1) { 254 vm_exit_during_initialization("GCPauseIntervalMillis should be " 255 "greater than 0"); 256 } 257 } 258 259 // Then, if the pause time target parameter was not set, set it to 260 // the default value. 261 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 262 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 263 // The default pause time target in G1 is 200ms 264 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 265 } else { 266 // We do not allow the pause interval to be set without the 267 // pause time target 268 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 269 "without setting MaxGCPauseMillis"); 270 } 271 } 272 273 // Then, if the interval parameter was not set, set it according to 274 // the pause time target (this will also deal with the case when the 275 // pause time target is the default value). 276 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 277 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 278 } 279 280 // Finally, make sure that the two parameters are consistent. 281 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 282 char buffer[256]; 283 jio_snprintf(buffer, 256, 284 "MaxGCPauseMillis (%u) should be less than " 285 "GCPauseIntervalMillis (%u)", 286 MaxGCPauseMillis, GCPauseIntervalMillis); 287 vm_exit_during_initialization(buffer); 288 } 289 290 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 291 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 292 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 293 294 // start conservatively (around 50ms is about right) 295 _concurrent_mark_remark_times_ms->add(0.05); 296 _concurrent_mark_cleanup_times_ms->add(0.20); 297 _tenuring_threshold = MaxTenuringThreshold; 298 // _max_survivor_regions will be calculated by 299 // update_young_list_target_length() during initialization. 300 _max_survivor_regions = 0; 301 302 assert(GCTimeRatio > 0, 303 "we should have set it to a default value set_g1_gc_flags() " 304 "if a user set it to 0"); 305 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 306 307 uintx reserve_perc = G1ReservePercent; 308 // Put an artificial ceiling on this so that it's not set to a silly value. 309 if (reserve_perc > 50) { 310 reserve_perc = 50; 311 warning("G1ReservePercent is set to a value that is too large, " 312 "it's been updated to %u", reserve_perc); 313 } 314 _reserve_factor = (double) reserve_perc / 100.0; 315 // This will be set when the heap is expanded 316 // for the first time during initialization. 317 _reserve_regions = 0; 318 319 _collectionSetChooser = new CollectionSetChooser(); 320 } 321 322 void G1CollectorPolicy::initialize_alignments() { 323 _space_alignment = HeapRegion::GrainBytes; 324 size_t card_table_alignment = GenRemSet::max_alignment_constraint(); 325 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 326 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 327 } 328 329 void G1CollectorPolicy::initialize_flags() { 330 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 331 FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes); 332 } 333 334 if (SurvivorRatio < 1) { 335 vm_exit_during_initialization("Invalid survivor ratio specified"); 336 } 337 CollectorPolicy::initialize_flags(); 338 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 339 } 340 341 void G1CollectorPolicy::post_heap_initialize() { 342 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 343 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 344 if (max_young_size != MaxNewSize) { 345 FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size); 346 } 347 } 348 349 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), 350 _min_desired_young_length(0), _max_desired_young_length(0) { 351 if (FLAG_IS_CMDLINE(NewRatio)) { 352 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 353 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 354 } else { 355 _sizer_kind = SizerNewRatio; 356 _adaptive_size = false; 357 return; 358 } 359 } 360 361 if (NewSize > MaxNewSize) { 362 if (FLAG_IS_CMDLINE(MaxNewSize)) { 363 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 364 "A new max generation size of " SIZE_FORMAT "k will be used.", 365 NewSize/K, MaxNewSize/K, NewSize/K); 366 } 367 MaxNewSize = NewSize; 368 } 369 370 if (FLAG_IS_CMDLINE(NewSize)) { 371 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 372 1U); 373 if (FLAG_IS_CMDLINE(MaxNewSize)) { 374 _max_desired_young_length = 375 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 376 1U); 377 _sizer_kind = SizerMaxAndNewSize; 378 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 379 } else { 380 _sizer_kind = SizerNewSizeOnly; 381 } 382 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 383 _max_desired_young_length = 384 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 385 1U); 386 _sizer_kind = SizerMaxNewSizeOnly; 387 } 388 } 389 390 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 391 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 392 return MAX2(1U, default_value); 393 } 394 395 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 396 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 397 return MAX2(1U, default_value); 398 } 399 400 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { 401 assert(number_of_heap_regions > 0, "Heap must be initialized"); 402 403 switch (_sizer_kind) { 404 case SizerDefaults: 405 *min_young_length = calculate_default_min_length(number_of_heap_regions); 406 *max_young_length = calculate_default_max_length(number_of_heap_regions); 407 break; 408 case SizerNewSizeOnly: 409 *max_young_length = calculate_default_max_length(number_of_heap_regions); 410 *max_young_length = MAX2(*min_young_length, *max_young_length); 411 break; 412 case SizerMaxNewSizeOnly: 413 *min_young_length = calculate_default_min_length(number_of_heap_regions); 414 *min_young_length = MIN2(*min_young_length, *max_young_length); 415 break; 416 case SizerMaxAndNewSize: 417 // Do nothing. Values set on the command line, don't update them at runtime. 418 break; 419 case SizerNewRatio: 420 *min_young_length = number_of_heap_regions / (NewRatio + 1); 421 *max_young_length = *min_young_length; 422 break; 423 default: 424 ShouldNotReachHere(); 425 } 426 427 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); 428 } 429 430 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { 431 // We need to pass the desired values because recalculation may not update these 432 // values in some cases. 433 uint temp = _min_desired_young_length; 434 uint result = _max_desired_young_length; 435 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); 436 return result; 437 } 438 439 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 440 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, 441 &_max_desired_young_length); 442 } 443 444 void G1CollectorPolicy::init() { 445 // Set aside an initial future to_space. 446 _g1 = G1CollectedHeap::heap(); 447 448 assert(Heap_lock->owned_by_self(), "Locking discipline."); 449 450 initialize_gc_policy_counters(); 451 452 if (adaptive_young_list_length()) { 453 _young_list_fixed_length = 0; 454 } else { 455 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 456 } 457 _free_regions_at_end_of_collection = _g1->num_free_regions(); 458 update_young_list_target_length(); 459 460 // We may immediately start allocating regions and placing them on the 461 // collection set list. Initialize the per-collection set info 462 start_incremental_cset_building(); 463 } 464 465 // Create the jstat counters for the policy. 466 void G1CollectorPolicy::initialize_gc_policy_counters() { 467 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 468 } 469 470 bool G1CollectorPolicy::predict_will_fit(uint young_length, 471 double base_time_ms, 472 uint base_free_regions, 473 double target_pause_time_ms) { 474 if (young_length >= base_free_regions) { 475 // end condition 1: not enough space for the young regions 476 return false; 477 } 478 479 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 480 size_t bytes_to_copy = 481 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 482 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 483 double young_other_time_ms = predict_young_other_time_ms(young_length); 484 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 485 if (pause_time_ms > target_pause_time_ms) { 486 // end condition 2: prediction is over the target pause time 487 return false; 488 } 489 490 size_t free_bytes = 491 (base_free_regions - young_length) * HeapRegion::GrainBytes; 492 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { 493 // end condition 3: out-of-space (conservatively!) 494 return false; 495 } 496 497 // success! 498 return true; 499 } 500 501 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 502 // re-calculate the necessary reserve 503 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 504 // We use ceiling so that if reserve_regions_d is > 0.0 (but 505 // smaller than 1.0) we'll get 1. 506 _reserve_regions = (uint) ceil(reserve_regions_d); 507 508 _young_gen_sizer->heap_size_changed(new_number_of_regions); 509 } 510 511 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 512 uint base_min_length) { 513 uint desired_min_length = 0; 514 if (adaptive_young_list_length()) { 515 if (_alloc_rate_ms_seq->num() > 3) { 516 double now_sec = os::elapsedTime(); 517 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 518 double alloc_rate_ms = predict_alloc_rate_ms(); 519 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 520 } else { 521 // otherwise we don't have enough info to make the prediction 522 } 523 } 524 desired_min_length += base_min_length; 525 // make sure we don't go below any user-defined minimum bound 526 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 527 } 528 529 uint G1CollectorPolicy::calculate_young_list_desired_max_length() { 530 // Here, we might want to also take into account any additional 531 // constraints (i.e., user-defined minimum bound). Currently, we 532 // effectively don't set this bound. 533 return _young_gen_sizer->max_desired_young_length(); 534 } 535 536 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 537 if (rs_lengths == (size_t) -1) { 538 // if it's set to the default value (-1), we should predict it; 539 // otherwise, use the given value. 540 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 541 } 542 543 // Calculate the absolute and desired min bounds. 544 545 // This is how many young regions we already have (currently: the survivors). 546 uint base_min_length = recorded_survivor_regions(); 547 // This is the absolute minimum young length, which ensures that we 548 // can allocate one eden region in the worst-case. 549 uint absolute_min_length = base_min_length + 1; 550 uint desired_min_length = 551 calculate_young_list_desired_min_length(base_min_length); 552 if (desired_min_length < absolute_min_length) { 553 desired_min_length = absolute_min_length; 554 } 555 556 // Calculate the absolute and desired max bounds. 557 558 // We will try our best not to "eat" into the reserve. 559 uint absolute_max_length = 0; 560 if (_free_regions_at_end_of_collection > _reserve_regions) { 561 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 562 } 563 uint desired_max_length = calculate_young_list_desired_max_length(); 564 if (desired_max_length > absolute_max_length) { 565 desired_max_length = absolute_max_length; 566 } 567 568 uint young_list_target_length = 0; 569 if (adaptive_young_list_length()) { 570 if (gcs_are_young()) { 571 young_list_target_length = 572 calculate_young_list_target_length(rs_lengths, 573 base_min_length, 574 desired_min_length, 575 desired_max_length); 576 _rs_lengths_prediction = rs_lengths; 577 } else { 578 // Don't calculate anything and let the code below bound it to 579 // the desired_min_length, i.e., do the next GC as soon as 580 // possible to maximize how many old regions we can add to it. 581 } 582 } else { 583 // The user asked for a fixed young gen so we'll fix the young gen 584 // whether the next GC is young or mixed. 585 young_list_target_length = _young_list_fixed_length; 586 } 587 588 // Make sure we don't go over the desired max length, nor under the 589 // desired min length. In case they clash, desired_min_length wins 590 // which is why that test is second. 591 if (young_list_target_length > desired_max_length) { 592 young_list_target_length = desired_max_length; 593 } 594 if (young_list_target_length < desired_min_length) { 595 young_list_target_length = desired_min_length; 596 } 597 598 assert(young_list_target_length > recorded_survivor_regions(), 599 "we should be able to allocate at least one eden region"); 600 assert(young_list_target_length >= absolute_min_length, "post-condition"); 601 _young_list_target_length = young_list_target_length; 602 603 update_max_gc_locker_expansion(); 604 } 605 606 uint 607 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 608 uint base_min_length, 609 uint desired_min_length, 610 uint desired_max_length) { 611 assert(adaptive_young_list_length(), "pre-condition"); 612 assert(gcs_are_young(), "only call this for young GCs"); 613 614 // In case some edge-condition makes the desired max length too small... 615 if (desired_max_length <= desired_min_length) { 616 return desired_min_length; 617 } 618 619 // We'll adjust min_young_length and max_young_length not to include 620 // the already allocated young regions (i.e., so they reflect the 621 // min and max eden regions we'll allocate). The base_min_length 622 // will be reflected in the predictions by the 623 // survivor_regions_evac_time prediction. 624 assert(desired_min_length > base_min_length, "invariant"); 625 uint min_young_length = desired_min_length - base_min_length; 626 assert(desired_max_length > base_min_length, "invariant"); 627 uint max_young_length = desired_max_length - base_min_length; 628 629 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 630 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 631 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 632 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 633 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 634 double base_time_ms = 635 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 636 survivor_regions_evac_time; 637 uint available_free_regions = _free_regions_at_end_of_collection; 638 uint base_free_regions = 0; 639 if (available_free_regions > _reserve_regions) { 640 base_free_regions = available_free_regions - _reserve_regions; 641 } 642 643 // Here, we will make sure that the shortest young length that 644 // makes sense fits within the target pause time. 645 646 if (predict_will_fit(min_young_length, base_time_ms, 647 base_free_regions, target_pause_time_ms)) { 648 // The shortest young length will fit into the target pause time; 649 // we'll now check whether the absolute maximum number of young 650 // regions will fit in the target pause time. If not, we'll do 651 // a binary search between min_young_length and max_young_length. 652 if (predict_will_fit(max_young_length, base_time_ms, 653 base_free_regions, target_pause_time_ms)) { 654 // The maximum young length will fit into the target pause time. 655 // We are done so set min young length to the maximum length (as 656 // the result is assumed to be returned in min_young_length). 657 min_young_length = max_young_length; 658 } else { 659 // The maximum possible number of young regions will not fit within 660 // the target pause time so we'll search for the optimal 661 // length. The loop invariants are: 662 // 663 // min_young_length < max_young_length 664 // min_young_length is known to fit into the target pause time 665 // max_young_length is known not to fit into the target pause time 666 // 667 // Going into the loop we know the above hold as we've just 668 // checked them. Every time around the loop we check whether 669 // the middle value between min_young_length and 670 // max_young_length fits into the target pause time. If it 671 // does, it becomes the new min. If it doesn't, it becomes 672 // the new max. This way we maintain the loop invariants. 673 674 assert(min_young_length < max_young_length, "invariant"); 675 uint diff = (max_young_length - min_young_length) / 2; 676 while (diff > 0) { 677 uint young_length = min_young_length + diff; 678 if (predict_will_fit(young_length, base_time_ms, 679 base_free_regions, target_pause_time_ms)) { 680 min_young_length = young_length; 681 } else { 682 max_young_length = young_length; 683 } 684 assert(min_young_length < max_young_length, "invariant"); 685 diff = (max_young_length - min_young_length) / 2; 686 } 687 // The results is min_young_length which, according to the 688 // loop invariants, should fit within the target pause time. 689 690 // These are the post-conditions of the binary search above: 691 assert(min_young_length < max_young_length, 692 "otherwise we should have discovered that max_young_length " 693 "fits into the pause target and not done the binary search"); 694 assert(predict_will_fit(min_young_length, base_time_ms, 695 base_free_regions, target_pause_time_ms), 696 "min_young_length, the result of the binary search, should " 697 "fit into the pause target"); 698 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 699 base_free_regions, target_pause_time_ms), 700 "min_young_length, the result of the binary search, should be " 701 "optimal, so no larger length should fit into the pause target"); 702 } 703 } else { 704 // Even the minimum length doesn't fit into the pause time 705 // target, return it as the result nevertheless. 706 } 707 return base_min_length + min_young_length; 708 } 709 710 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 711 double survivor_regions_evac_time = 0.0; 712 for (HeapRegion * r = _recorded_survivor_head; 713 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 714 r = r->get_next_young_region()) { 715 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); 716 } 717 return survivor_regions_evac_time; 718 } 719 720 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 721 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 722 723 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 724 if (rs_lengths > _rs_lengths_prediction) { 725 // add 10% to avoid having to recalculate often 726 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 727 update_young_list_target_length(rs_lengths_prediction); 728 } 729 } 730 731 732 733 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 734 bool is_tlab, 735 bool* gc_overhead_limit_was_exceeded) { 736 guarantee(false, "Not using this policy feature yet."); 737 return NULL; 738 } 739 740 // This method controls how a collector handles one or more 741 // of its generations being fully allocated. 742 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 743 bool is_tlab) { 744 guarantee(false, "Not using this policy feature yet."); 745 return NULL; 746 } 747 748 749 #ifndef PRODUCT 750 bool G1CollectorPolicy::verify_young_ages() { 751 HeapRegion* head = _g1->young_list()->first_region(); 752 return 753 verify_young_ages(head, _short_lived_surv_rate_group); 754 // also call verify_young_ages on any additional surv rate groups 755 } 756 757 bool 758 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 759 SurvRateGroup *surv_rate_group) { 760 guarantee( surv_rate_group != NULL, "pre-condition" ); 761 762 const char* name = surv_rate_group->name(); 763 bool ret = true; 764 int prev_age = -1; 765 766 for (HeapRegion* curr = head; 767 curr != NULL; 768 curr = curr->get_next_young_region()) { 769 SurvRateGroup* group = curr->surv_rate_group(); 770 if (group == NULL && !curr->is_survivor()) { 771 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 772 ret = false; 773 } 774 775 if (surv_rate_group == group) { 776 int age = curr->age_in_surv_rate_group(); 777 778 if (age < 0) { 779 gclog_or_tty->print_cr("## %s: encountered negative age", name); 780 ret = false; 781 } 782 783 if (age <= prev_age) { 784 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 785 "(%d, %d)", name, age, prev_age); 786 ret = false; 787 } 788 prev_age = age; 789 } 790 } 791 792 return ret; 793 } 794 #endif // PRODUCT 795 796 void G1CollectorPolicy::record_full_collection_start() { 797 _full_collection_start_sec = os::elapsedTime(); 798 record_heap_size_info_at_start(true /* full */); 799 // Release the future to-space so that it is available for compaction into. 800 _g1->set_full_collection(); 801 } 802 803 void G1CollectorPolicy::record_full_collection_end() { 804 // Consider this like a collection pause for the purposes of allocation 805 // since last pause. 806 double end_sec = os::elapsedTime(); 807 double full_gc_time_sec = end_sec - _full_collection_start_sec; 808 double full_gc_time_ms = full_gc_time_sec * 1000.0; 809 810 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms); 811 812 update_recent_gc_times(end_sec, full_gc_time_ms); 813 814 _g1->clear_full_collection(); 815 816 // "Nuke" the heuristics that control the young/mixed GC 817 // transitions and make sure we start with young GCs after the Full GC. 818 set_gcs_are_young(true); 819 _last_young_gc = false; 820 clear_initiate_conc_mark_if_possible(); 821 clear_during_initial_mark_pause(); 822 _in_marking_window = false; 823 _in_marking_window_im = false; 824 825 _short_lived_surv_rate_group->start_adding_regions(); 826 // also call this on any additional surv rate groups 827 828 record_survivor_regions(0, NULL, NULL); 829 830 _free_regions_at_end_of_collection = _g1->num_free_regions(); 831 // Reset survivors SurvRateGroup. 832 _survivor_surv_rate_group->reset(); 833 update_young_list_target_length(); 834 _collectionSetChooser->clear(); 835 } 836 837 void G1CollectorPolicy::record_stop_world_start() { 838 _stop_world_start = os::elapsedTime(); 839 } 840 841 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 842 // We only need to do this here as the policy will only be applied 843 // to the GC we're about to start. so, no point is calculating this 844 // every time we calculate / recalculate the target young length. 845 update_survivors_policy(); 846 847 assert(_g1->used() == _g1->recalculate_used(), 848 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 849 _g1->used(), _g1->recalculate_used())); 850 851 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 852 _trace_young_gen_time_data.record_start_collection(s_w_t_ms); 853 _stop_world_start = 0.0; 854 855 record_heap_size_info_at_start(false /* full */); 856 857 phase_times()->record_cur_collection_start_sec(start_time_sec); 858 _pending_cards = _g1->pending_card_num(); 859 860 _collection_set_bytes_used_before = 0; 861 _bytes_copied_during_gc = 0; 862 863 _last_gc_was_young = false; 864 865 // do that for any other surv rate groups 866 _short_lived_surv_rate_group->stop_adding_regions(); 867 _survivors_age_table.clear(); 868 869 assert( verify_young_ages(), "region age verification" ); 870 } 871 872 void G1CollectorPolicy::record_concurrent_mark_init_end(double 873 mark_init_elapsed_time_ms) { 874 _during_marking = true; 875 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 876 clear_during_initial_mark_pause(); 877 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 878 } 879 880 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 881 _mark_remark_start_sec = os::elapsedTime(); 882 _during_marking = false; 883 } 884 885 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 886 double end_time_sec = os::elapsedTime(); 887 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 888 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 889 _cur_mark_stop_world_time_ms += elapsed_time_ms; 890 _prev_collection_pause_end_ms += elapsed_time_ms; 891 892 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 893 } 894 895 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 896 _mark_cleanup_start_sec = os::elapsedTime(); 897 } 898 899 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 900 _last_young_gc = true; 901 _in_marking_window = false; 902 } 903 904 void G1CollectorPolicy::record_concurrent_pause() { 905 if (_stop_world_start > 0.0) { 906 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 907 _trace_young_gen_time_data.record_yield_time(yield_ms); 908 } 909 } 910 911 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 912 if (_g1->concurrent_mark()->cmThread()->during_cycle()) { 913 return false; 914 } 915 916 size_t marking_initiating_used_threshold = 917 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 918 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 919 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 920 921 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { 922 if (gcs_are_young() && !_last_young_gc) { 923 ergo_verbose5(ErgoConcCycles, 924 "request concurrent cycle initiation", 925 ergo_format_reason("occupancy higher than threshold") 926 ergo_format_byte("occupancy") 927 ergo_format_byte("allocation request") 928 ergo_format_byte_perc("threshold") 929 ergo_format_str("source"), 930 cur_used_bytes, 931 alloc_byte_size, 932 marking_initiating_used_threshold, 933 (double) InitiatingHeapOccupancyPercent, 934 source); 935 return true; 936 } else { 937 ergo_verbose5(ErgoConcCycles, 938 "do not request concurrent cycle initiation", 939 ergo_format_reason("still doing mixed collections") 940 ergo_format_byte("occupancy") 941 ergo_format_byte("allocation request") 942 ergo_format_byte_perc("threshold") 943 ergo_format_str("source"), 944 cur_used_bytes, 945 alloc_byte_size, 946 marking_initiating_used_threshold, 947 (double) InitiatingHeapOccupancyPercent, 948 source); 949 } 950 } 951 952 return false; 953 } 954 955 // Anything below that is considered to be zero 956 #define MIN_TIMER_GRANULARITY 0.0000001 957 958 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { 959 double end_time_sec = os::elapsedTime(); 960 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), 961 "otherwise, the subtraction below does not make sense"); 962 size_t rs_size = 963 _cur_collection_pause_used_regions_at_start - cset_region_length(); 964 size_t cur_used_bytes = _g1->used(); 965 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 966 bool last_pause_included_initial_mark = false; 967 bool update_stats = !_g1->evacuation_failed(); 968 969 #ifndef PRODUCT 970 if (G1YoungSurvRateVerbose) { 971 gclog_or_tty->cr(); 972 _short_lived_surv_rate_group->print(); 973 // do that for any other surv rate groups too 974 } 975 #endif // PRODUCT 976 977 last_pause_included_initial_mark = during_initial_mark_pause(); 978 if (last_pause_included_initial_mark) { 979 record_concurrent_mark_init_end(0.0); 980 } else if (need_to_start_conc_mark("end of GC")) { 981 // Note: this might have already been set, if during the last 982 // pause we decided to start a cycle but at the beginning of 983 // this pause we decided to postpone it. That's OK. 984 set_initiate_conc_mark_if_possible(); 985 } 986 987 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, 988 end_time_sec, false); 989 990 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); 991 evacuation_info.set_bytes_copied(_bytes_copied_during_gc); 992 993 if (update_stats) { 994 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); 995 // this is where we update the allocation rate of the application 996 double app_time_ms = 997 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 998 if (app_time_ms < MIN_TIMER_GRANULARITY) { 999 // This usually happens due to the timer not having the required 1000 // granularity. Some Linuxes are the usual culprits. 1001 // We'll just set it to something (arbitrarily) small. 1002 app_time_ms = 1.0; 1003 } 1004 // We maintain the invariant that all objects allocated by mutator 1005 // threads will be allocated out of eden regions. So, we can use 1006 // the eden region number allocated since the previous GC to 1007 // calculate the application's allocate rate. The only exception 1008 // to that is humongous objects that are allocated separately. But 1009 // given that humongous object allocations do not really affect 1010 // either the pause's duration nor when the next pause will take 1011 // place we can safely ignore them here. 1012 uint regions_allocated = eden_cset_region_length(); 1013 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1014 _alloc_rate_ms_seq->add(alloc_rate_ms); 1015 1016 double interval_ms = 1017 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1018 update_recent_gc_times(end_time_sec, pause_time_ms); 1019 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1020 if (recent_avg_pause_time_ratio() < 0.0 || 1021 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1022 #ifndef PRODUCT 1023 // Dump info to allow post-facto debugging 1024 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 1025 gclog_or_tty->print_cr("-------------------------------------------"); 1026 gclog_or_tty->print_cr("Recent GC Times (ms):"); 1027 _recent_gc_times_ms->dump(); 1028 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 1029 _recent_prev_end_times_for_all_gcs_sec->dump(); 1030 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 1031 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 1032 // In debug mode, terminate the JVM if the user wants to debug at this point. 1033 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 1034 #endif // !PRODUCT 1035 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1036 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1037 if (_recent_avg_pause_time_ratio < 0.0) { 1038 _recent_avg_pause_time_ratio = 0.0; 1039 } else { 1040 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1041 _recent_avg_pause_time_ratio = 1.0; 1042 } 1043 } 1044 } 1045 1046 bool new_in_marking_window = _in_marking_window; 1047 bool new_in_marking_window_im = false; 1048 if (last_pause_included_initial_mark) { 1049 new_in_marking_window = true; 1050 new_in_marking_window_im = true; 1051 } 1052 1053 if (_last_young_gc) { 1054 // This is supposed to to be the "last young GC" before we start 1055 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1056 1057 if (!last_pause_included_initial_mark) { 1058 if (next_gc_should_be_mixed("start mixed GCs", 1059 "do not start mixed GCs")) { 1060 set_gcs_are_young(false); 1061 } 1062 } else { 1063 ergo_verbose0(ErgoMixedGCs, 1064 "do not start mixed GCs", 1065 ergo_format_reason("concurrent cycle is about to start")); 1066 } 1067 _last_young_gc = false; 1068 } 1069 1070 if (!_last_gc_was_young) { 1071 // This is a mixed GC. Here we decide whether to continue doing 1072 // mixed GCs or not. 1073 1074 if (!next_gc_should_be_mixed("continue mixed GCs", 1075 "do not continue mixed GCs")) { 1076 set_gcs_are_young(true); 1077 } 1078 } 1079 1080 _short_lived_surv_rate_group->start_adding_regions(); 1081 // Do that for any other surv rate groups 1082 1083 if (update_stats) { 1084 double cost_per_card_ms = 0.0; 1085 if (_pending_cards > 0) { 1086 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards; 1087 _cost_per_card_ms_seq->add(cost_per_card_ms); 1088 } 1089 1090 size_t cards_scanned = _g1->cards_scanned(); 1091 1092 double cost_per_entry_ms = 0.0; 1093 if (cards_scanned > 10) { 1094 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; 1095 if (_last_gc_was_young) { 1096 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1097 } else { 1098 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1099 } 1100 } 1101 1102 if (_max_rs_lengths > 0) { 1103 double cards_per_entry_ratio = 1104 (double) cards_scanned / (double) _max_rs_lengths; 1105 if (_last_gc_was_young) { 1106 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1107 } else { 1108 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1109 } 1110 } 1111 1112 // This is defensive. For a while _max_rs_lengths could get 1113 // smaller than _recorded_rs_lengths which was causing 1114 // rs_length_diff to get very large and mess up the RSet length 1115 // predictions. The reason was unsafe concurrent updates to the 1116 // _inc_cset_recorded_rs_lengths field which the code below guards 1117 // against (see CR 7118202). This bug has now been fixed (see CR 1118 // 7119027). However, I'm still worried that 1119 // _inc_cset_recorded_rs_lengths might still end up somewhat 1120 // inaccurate. The concurrent refinement thread calculates an 1121 // RSet's length concurrently with other CR threads updating it 1122 // which might cause it to calculate the length incorrectly (if, 1123 // say, it's in mid-coarsening). So I'll leave in the defensive 1124 // conditional below just in case. 1125 size_t rs_length_diff = 0; 1126 if (_max_rs_lengths > _recorded_rs_lengths) { 1127 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1128 } 1129 _rs_length_diff_seq->add((double) rs_length_diff); 1130 1131 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes; 1132 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1133 double cost_per_byte_ms = 0.0; 1134 1135 if (copied_bytes > 0) { 1136 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; 1137 if (_in_marking_window) { 1138 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1139 } else { 1140 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1141 } 1142 } 1143 1144 double all_other_time_ms = pause_time_ms - 1145 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time() 1146 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time()); 1147 1148 double young_other_time_ms = 0.0; 1149 if (young_cset_region_length() > 0) { 1150 young_other_time_ms = 1151 phase_times()->young_cset_choice_time_ms() + 1152 phase_times()->young_free_cset_time_ms(); 1153 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1154 (double) young_cset_region_length()); 1155 } 1156 double non_young_other_time_ms = 0.0; 1157 if (old_cset_region_length() > 0) { 1158 non_young_other_time_ms = 1159 phase_times()->non_young_cset_choice_time_ms() + 1160 phase_times()->non_young_free_cset_time_ms(); 1161 1162 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1163 (double) old_cset_region_length()); 1164 } 1165 1166 double constant_other_time_ms = all_other_time_ms - 1167 (young_other_time_ms + non_young_other_time_ms); 1168 _constant_other_time_ms_seq->add(constant_other_time_ms); 1169 1170 double survival_ratio = 0.0; 1171 if (_collection_set_bytes_used_before > 0) { 1172 survival_ratio = (double) _bytes_copied_during_gc / 1173 (double) _collection_set_bytes_used_before; 1174 } 1175 1176 _pending_cards_seq->add((double) _pending_cards); 1177 _rs_lengths_seq->add((double) _max_rs_lengths); 1178 } 1179 1180 _in_marking_window = new_in_marking_window; 1181 _in_marking_window_im = new_in_marking_window_im; 1182 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1183 update_young_list_target_length(); 1184 1185 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1186 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1187 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), 1188 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms); 1189 1190 _collectionSetChooser->verify(); 1191 } 1192 1193 #define EXT_SIZE_FORMAT "%.1f%s" 1194 #define EXT_SIZE_PARAMS(bytes) \ 1195 byte_size_in_proper_unit((double)(bytes)), \ 1196 proper_unit_for_byte_size((bytes)) 1197 1198 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { 1199 YoungList* young_list = _g1->young_list(); 1200 _eden_used_bytes_before_gc = young_list->eden_used_bytes(); 1201 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); 1202 _heap_capacity_bytes_before_gc = _g1->capacity(); 1203 _heap_used_bytes_before_gc = _g1->used(); 1204 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions(); 1205 1206 _eden_capacity_bytes_before_gc = 1207 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; 1208 1209 if (full) { 1210 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes(); 1211 } 1212 } 1213 1214 void G1CollectorPolicy::print_heap_transition() { 1215 _g1->print_size_transition(gclog_or_tty, 1216 _heap_used_bytes_before_gc, 1217 _g1->used(), 1218 _g1->capacity()); 1219 } 1220 1221 void G1CollectorPolicy::print_detailed_heap_transition(bool full) { 1222 YoungList* young_list = _g1->young_list(); 1223 1224 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); 1225 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); 1226 size_t heap_used_bytes_after_gc = _g1->used(); 1227 1228 size_t heap_capacity_bytes_after_gc = _g1->capacity(); 1229 size_t eden_capacity_bytes_after_gc = 1230 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; 1231 1232 gclog_or_tty->print( 1233 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " 1234 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1235 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1236 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1237 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), 1238 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), 1239 EXT_SIZE_PARAMS(eden_used_bytes_after_gc), 1240 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), 1241 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), 1242 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), 1243 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), 1244 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), 1245 EXT_SIZE_PARAMS(heap_used_bytes_after_gc), 1246 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); 1247 1248 if (full) { 1249 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); 1250 } 1251 1252 gclog_or_tty->cr(); 1253 } 1254 1255 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1256 double update_rs_processed_buffers, 1257 double goal_ms) { 1258 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1259 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1260 1261 if (G1UseAdaptiveConcRefinement) { 1262 const int k_gy = 3, k_gr = 6; 1263 const double inc_k = 1.1, dec_k = 0.9; 1264 1265 int g = cg1r->green_zone(); 1266 if (update_rs_time > goal_ms) { 1267 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1268 } else { 1269 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1270 g = (int)MAX2(g * inc_k, g + 1.0); 1271 } 1272 } 1273 // Change the refinement threads params 1274 cg1r->set_green_zone(g); 1275 cg1r->set_yellow_zone(g * k_gy); 1276 cg1r->set_red_zone(g * k_gr); 1277 cg1r->reinitialize_threads(); 1278 1279 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1280 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1281 cg1r->yellow_zone()); 1282 // Change the barrier params 1283 dcqs.set_process_completed_threshold(processing_threshold); 1284 dcqs.set_max_completed_queue(cg1r->red_zone()); 1285 } 1286 1287 int curr_queue_size = dcqs.completed_buffers_num(); 1288 if (curr_queue_size >= cg1r->yellow_zone()) { 1289 dcqs.set_completed_queue_padding(curr_queue_size); 1290 } else { 1291 dcqs.set_completed_queue_padding(0); 1292 } 1293 dcqs.notify_if_necessary(); 1294 } 1295 1296 double 1297 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1298 size_t scanned_cards) { 1299 return 1300 predict_rs_update_time_ms(pending_cards) + 1301 predict_rs_scan_time_ms(scanned_cards) + 1302 predict_constant_other_time_ms(); 1303 } 1304 1305 double 1306 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1307 size_t rs_length = predict_rs_length_diff(); 1308 size_t card_num; 1309 if (gcs_are_young()) { 1310 card_num = predict_young_card_num(rs_length); 1311 } else { 1312 card_num = predict_non_young_card_num(rs_length); 1313 } 1314 return predict_base_elapsed_time_ms(pending_cards, card_num); 1315 } 1316 1317 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1318 size_t bytes_to_copy; 1319 if (hr->is_marked()) 1320 bytes_to_copy = hr->max_live_bytes(); 1321 else { 1322 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1323 int age = hr->age_in_surv_rate_group(); 1324 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1325 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1326 } 1327 return bytes_to_copy; 1328 } 1329 1330 double 1331 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1332 bool for_young_gc) { 1333 size_t rs_length = hr->rem_set()->occupied(); 1334 size_t card_num; 1335 1336 // Predicting the number of cards is based on which type of GC 1337 // we're predicting for. 1338 if (for_young_gc) { 1339 card_num = predict_young_card_num(rs_length); 1340 } else { 1341 card_num = predict_non_young_card_num(rs_length); 1342 } 1343 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1344 1345 double region_elapsed_time_ms = 1346 predict_rs_scan_time_ms(card_num) + 1347 predict_object_copy_time_ms(bytes_to_copy); 1348 1349 // The prediction of the "other" time for this region is based 1350 // upon the region type and NOT the GC type. 1351 if (hr->is_young()) { 1352 region_elapsed_time_ms += predict_young_other_time_ms(1); 1353 } else { 1354 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1355 } 1356 return region_elapsed_time_ms; 1357 } 1358 1359 void 1360 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1361 uint survivor_cset_region_length) { 1362 _eden_cset_region_length = eden_cset_region_length; 1363 _survivor_cset_region_length = survivor_cset_region_length; 1364 _old_cset_region_length = 0; 1365 } 1366 1367 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1368 _recorded_rs_lengths = rs_lengths; 1369 } 1370 1371 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1372 double elapsed_ms) { 1373 _recent_gc_times_ms->add(elapsed_ms); 1374 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1375 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1376 } 1377 1378 size_t G1CollectorPolicy::expansion_amount() { 1379 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1380 double threshold = _gc_overhead_perc; 1381 if (recent_gc_overhead > threshold) { 1382 // We will double the existing space, or take 1383 // G1ExpandByPercentOfAvailable % of the available expansion 1384 // space, whichever is smaller, bounded below by a minimum 1385 // expansion (unless that's all that's left.) 1386 const size_t min_expand_bytes = 1*M; 1387 size_t reserved_bytes = _g1->max_capacity(); 1388 size_t committed_bytes = _g1->capacity(); 1389 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1390 size_t expand_bytes; 1391 size_t expand_bytes_via_pct = 1392 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1393 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1394 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1395 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1396 1397 ergo_verbose5(ErgoHeapSizing, 1398 "attempt heap expansion", 1399 ergo_format_reason("recent GC overhead higher than " 1400 "threshold after GC") 1401 ergo_format_perc("recent GC overhead") 1402 ergo_format_perc("threshold") 1403 ergo_format_byte("uncommitted") 1404 ergo_format_byte_perc("calculated expansion amount"), 1405 recent_gc_overhead, threshold, 1406 uncommitted_bytes, 1407 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable); 1408 1409 return expand_bytes; 1410 } else { 1411 return 0; 1412 } 1413 } 1414 1415 void G1CollectorPolicy::print_tracing_info() const { 1416 _trace_young_gen_time_data.print(); 1417 _trace_old_gen_time_data.print(); 1418 } 1419 1420 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1421 #ifndef PRODUCT 1422 _short_lived_surv_rate_group->print_surv_rate_summary(); 1423 // add this call for any other surv rate groups 1424 #endif // PRODUCT 1425 } 1426 1427 bool G1CollectorPolicy::is_young_list_full() { 1428 uint young_list_length = _g1->young_list()->length(); 1429 uint young_list_target_length = _young_list_target_length; 1430 return young_list_length >= young_list_target_length; 1431 } 1432 1433 bool G1CollectorPolicy::can_expand_young_list() { 1434 uint young_list_length = _g1->young_list()->length(); 1435 uint young_list_max_length = _young_list_max_length; 1436 return young_list_length < young_list_max_length; 1437 } 1438 1439 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1440 uint expansion_region_num = 0; 1441 if (GCLockerEdenExpansionPercent > 0) { 1442 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1443 double expansion_region_num_d = perc * (double) _young_list_target_length; 1444 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1445 // less than 1.0) we'll get 1. 1446 expansion_region_num = (uint) ceil(expansion_region_num_d); 1447 } else { 1448 assert(expansion_region_num == 0, "sanity"); 1449 } 1450 _young_list_max_length = _young_list_target_length + expansion_region_num; 1451 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1452 } 1453 1454 // Calculates survivor space parameters. 1455 void G1CollectorPolicy::update_survivors_policy() { 1456 double max_survivor_regions_d = 1457 (double) _young_list_target_length / (double) SurvivorRatio; 1458 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1459 // smaller than 1.0) we'll get 1. 1460 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1461 1462 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1463 HeapRegion::GrainWords * _max_survivor_regions); 1464 } 1465 1466 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( 1467 GCCause::Cause gc_cause) { 1468 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1469 if (!during_cycle) { 1470 ergo_verbose1(ErgoConcCycles, 1471 "request concurrent cycle initiation", 1472 ergo_format_reason("requested by GC cause") 1473 ergo_format_str("GC cause"), 1474 GCCause::to_string(gc_cause)); 1475 set_initiate_conc_mark_if_possible(); 1476 return true; 1477 } else { 1478 ergo_verbose1(ErgoConcCycles, 1479 "do not request concurrent cycle initiation", 1480 ergo_format_reason("concurrent cycle already in progress") 1481 ergo_format_str("GC cause"), 1482 GCCause::to_string(gc_cause)); 1483 return false; 1484 } 1485 } 1486 1487 void 1488 G1CollectorPolicy::decide_on_conc_mark_initiation() { 1489 // We are about to decide on whether this pause will be an 1490 // initial-mark pause. 1491 1492 // First, during_initial_mark_pause() should not be already set. We 1493 // will set it here if we have to. However, it should be cleared by 1494 // the end of the pause (it's only set for the duration of an 1495 // initial-mark pause). 1496 assert(!during_initial_mark_pause(), "pre-condition"); 1497 1498 if (initiate_conc_mark_if_possible()) { 1499 // We had noticed on a previous pause that the heap occupancy has 1500 // gone over the initiating threshold and we should start a 1501 // concurrent marking cycle. So we might initiate one. 1502 1503 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1504 if (!during_cycle) { 1505 // The concurrent marking thread is not "during a cycle", i.e., 1506 // it has completed the last one. So we can go ahead and 1507 // initiate a new cycle. 1508 1509 set_during_initial_mark_pause(); 1510 // We do not allow mixed GCs during marking. 1511 if (!gcs_are_young()) { 1512 set_gcs_are_young(true); 1513 ergo_verbose0(ErgoMixedGCs, 1514 "end mixed GCs", 1515 ergo_format_reason("concurrent cycle is about to start")); 1516 } 1517 1518 // And we can now clear initiate_conc_mark_if_possible() as 1519 // we've already acted on it. 1520 clear_initiate_conc_mark_if_possible(); 1521 1522 ergo_verbose0(ErgoConcCycles, 1523 "initiate concurrent cycle", 1524 ergo_format_reason("concurrent cycle initiation requested")); 1525 } else { 1526 // The concurrent marking thread is still finishing up the 1527 // previous cycle. If we start one right now the two cycles 1528 // overlap. In particular, the concurrent marking thread might 1529 // be in the process of clearing the next marking bitmap (which 1530 // we will use for the next cycle if we start one). Starting a 1531 // cycle now will be bad given that parts of the marking 1532 // information might get cleared by the marking thread. And we 1533 // cannot wait for the marking thread to finish the cycle as it 1534 // periodically yields while clearing the next marking bitmap 1535 // and, if it's in a yield point, it's waiting for us to 1536 // finish. So, at this point we will not start a cycle and we'll 1537 // let the concurrent marking thread complete the last one. 1538 ergo_verbose0(ErgoConcCycles, 1539 "do not initiate concurrent cycle", 1540 ergo_format_reason("concurrent cycle already in progress")); 1541 } 1542 } 1543 } 1544 1545 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1546 G1CollectedHeap* _g1h; 1547 CSetChooserParUpdater _cset_updater; 1548 1549 public: 1550 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1551 uint chunk_size) : 1552 _g1h(G1CollectedHeap::heap()), 1553 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1554 1555 bool doHeapRegion(HeapRegion* r) { 1556 // Do we have any marking information for this region? 1557 if (r->is_marked()) { 1558 // We will skip any region that's currently used as an old GC 1559 // alloc region (we should not consider those for collection 1560 // before we fill them up). 1561 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1562 _cset_updater.add_region(r); 1563 } 1564 } 1565 return false; 1566 } 1567 }; 1568 1569 class ParKnownGarbageTask: public AbstractGangTask { 1570 CollectionSetChooser* _hrSorted; 1571 uint _chunk_size; 1572 G1CollectedHeap* _g1; 1573 HeapRegionClaimer _hrclaimer; 1574 1575 public: 1576 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1577 AbstractGangTask("ParKnownGarbageTask"), 1578 _hrSorted(hrSorted), _chunk_size(chunk_size), 1579 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1580 1581 void work(uint worker_id) { 1582 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1583 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1584 } 1585 }; 1586 1587 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) { 1588 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1589 const uint overpartition_factor = 4; 1590 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1591 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1592 } 1593 1594 void 1595 G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) { 1596 _collectionSetChooser->clear(); 1597 1598 uint n_regions = _g1->num_regions(); 1599 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1600 _collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size); 1601 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers); 1602 _g1->workers()->run_task(&par_known_garbage_task); 1603 1604 _collectionSetChooser->sort_regions(); 1605 1606 double end_sec = os::elapsedTime(); 1607 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1608 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1609 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1610 _prev_collection_pause_end_ms += elapsed_time_ms; 1611 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); 1612 } 1613 1614 // Add the heap region at the head of the non-incremental collection set 1615 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1616 assert(_inc_cset_build_state == Active, "Precondition"); 1617 assert(hr->is_old(), "the region should be old"); 1618 1619 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1620 hr->set_in_collection_set(true); 1621 hr->set_next_in_collection_set(_collection_set); 1622 _collection_set = hr; 1623 _collection_set_bytes_used_before += hr->used(); 1624 _g1->register_old_region_with_in_cset_fast_test(hr); 1625 size_t rs_length = hr->rem_set()->occupied(); 1626 _recorded_rs_lengths += rs_length; 1627 _old_cset_region_length += 1; 1628 } 1629 1630 // Initialize the per-collection-set information 1631 void G1CollectorPolicy::start_incremental_cset_building() { 1632 assert(_inc_cset_build_state == Inactive, "Precondition"); 1633 1634 _inc_cset_head = NULL; 1635 _inc_cset_tail = NULL; 1636 _inc_cset_bytes_used_before = 0; 1637 1638 _inc_cset_max_finger = 0; 1639 _inc_cset_recorded_rs_lengths = 0; 1640 _inc_cset_recorded_rs_lengths_diffs = 0; 1641 _inc_cset_predicted_elapsed_time_ms = 0.0; 1642 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1643 _inc_cset_build_state = Active; 1644 } 1645 1646 void G1CollectorPolicy::finalize_incremental_cset_building() { 1647 assert(_inc_cset_build_state == Active, "Precondition"); 1648 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1649 1650 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1651 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1652 // that adds a new region to the CSet. Further updates by the 1653 // concurrent refinement thread that samples the young RSet lengths 1654 // are accumulated in the *_diffs fields. Here we add the diffs to 1655 // the "main" fields. 1656 1657 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1658 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1659 } else { 1660 // This is defensive. The diff should in theory be always positive 1661 // as RSets can only grow between GCs. However, given that we 1662 // sample their size concurrently with other threads updating them 1663 // it's possible that we might get the wrong size back, which 1664 // could make the calculations somewhat inaccurate. 1665 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1666 if (_inc_cset_recorded_rs_lengths >= diffs) { 1667 _inc_cset_recorded_rs_lengths -= diffs; 1668 } else { 1669 _inc_cset_recorded_rs_lengths = 0; 1670 } 1671 } 1672 _inc_cset_predicted_elapsed_time_ms += 1673 _inc_cset_predicted_elapsed_time_ms_diffs; 1674 1675 _inc_cset_recorded_rs_lengths_diffs = 0; 1676 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1677 } 1678 1679 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1680 // This routine is used when: 1681 // * adding survivor regions to the incremental cset at the end of an 1682 // evacuation pause, 1683 // * adding the current allocation region to the incremental cset 1684 // when it is retired, and 1685 // * updating existing policy information for a region in the 1686 // incremental cset via young list RSet sampling. 1687 // Therefore this routine may be called at a safepoint by the 1688 // VM thread, or in-between safepoints by mutator threads (when 1689 // retiring the current allocation region) or a concurrent 1690 // refine thread (RSet sampling). 1691 1692 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1693 size_t used_bytes = hr->used(); 1694 _inc_cset_recorded_rs_lengths += rs_length; 1695 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1696 _inc_cset_bytes_used_before += used_bytes; 1697 1698 // Cache the values we have added to the aggregated information 1699 // in the heap region in case we have to remove this region from 1700 // the incremental collection set, or it is updated by the 1701 // rset sampling code 1702 hr->set_recorded_rs_length(rs_length); 1703 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1704 } 1705 1706 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1707 size_t new_rs_length) { 1708 // Update the CSet information that is dependent on the new RS length 1709 assert(hr->is_young(), "Precondition"); 1710 assert(!SafepointSynchronize::is_at_safepoint(), 1711 "should not be at a safepoint"); 1712 1713 // We could have updated _inc_cset_recorded_rs_lengths and 1714 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1715 // that atomically, as this code is executed by a concurrent 1716 // refinement thread, potentially concurrently with a mutator thread 1717 // allocating a new region and also updating the same fields. To 1718 // avoid the atomic operations we accumulate these updates on two 1719 // separate fields (*_diffs) and we'll just add them to the "main" 1720 // fields at the start of a GC. 1721 1722 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1723 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1724 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1725 1726 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1727 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1728 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1729 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1730 1731 hr->set_recorded_rs_length(new_rs_length); 1732 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1733 } 1734 1735 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1736 assert(hr->is_young(), "invariant"); 1737 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1738 assert(_inc_cset_build_state == Active, "Precondition"); 1739 1740 // We need to clear and set the cached recorded/cached collection set 1741 // information in the heap region here (before the region gets added 1742 // to the collection set). An individual heap region's cached values 1743 // are calculated, aggregated with the policy collection set info, 1744 // and cached in the heap region here (initially) and (subsequently) 1745 // by the Young List sampling code. 1746 1747 size_t rs_length = hr->rem_set()->occupied(); 1748 add_to_incremental_cset_info(hr, rs_length); 1749 1750 HeapWord* hr_end = hr->end(); 1751 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1752 1753 assert(!hr->in_collection_set(), "invariant"); 1754 hr->set_in_collection_set(true); 1755 assert( hr->next_in_collection_set() == NULL, "invariant"); 1756 1757 _g1->register_young_region_with_in_cset_fast_test(hr); 1758 } 1759 1760 // Add the region at the RHS of the incremental cset 1761 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1762 // We should only ever be appending survivors at the end of a pause 1763 assert(hr->is_survivor(), "Logic"); 1764 1765 // Do the 'common' stuff 1766 add_region_to_incremental_cset_common(hr); 1767 1768 // Now add the region at the right hand side 1769 if (_inc_cset_tail == NULL) { 1770 assert(_inc_cset_head == NULL, "invariant"); 1771 _inc_cset_head = hr; 1772 } else { 1773 _inc_cset_tail->set_next_in_collection_set(hr); 1774 } 1775 _inc_cset_tail = hr; 1776 } 1777 1778 // Add the region to the LHS of the incremental cset 1779 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1780 // Survivors should be added to the RHS at the end of a pause 1781 assert(hr->is_eden(), "Logic"); 1782 1783 // Do the 'common' stuff 1784 add_region_to_incremental_cset_common(hr); 1785 1786 // Add the region at the left hand side 1787 hr->set_next_in_collection_set(_inc_cset_head); 1788 if (_inc_cset_head == NULL) { 1789 assert(_inc_cset_tail == NULL, "Invariant"); 1790 _inc_cset_tail = hr; 1791 } 1792 _inc_cset_head = hr; 1793 } 1794 1795 #ifndef PRODUCT 1796 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1797 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 1798 1799 st->print_cr("\nCollection_set:"); 1800 HeapRegion* csr = list_head; 1801 while (csr != NULL) { 1802 HeapRegion* next = csr->next_in_collection_set(); 1803 assert(csr->in_collection_set(), "bad CS"); 1804 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", 1805 HR_FORMAT_PARAMS(csr), 1806 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(), 1807 csr->age_in_surv_rate_group_cond()); 1808 csr = next; 1809 } 1810 } 1811 #endif // !PRODUCT 1812 1813 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) { 1814 // Returns the given amount of reclaimable bytes (that represents 1815 // the amount of reclaimable space still to be collected) as a 1816 // percentage of the current heap capacity. 1817 size_t capacity_bytes = _g1->capacity(); 1818 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1819 } 1820 1821 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1822 const char* false_action_str) { 1823 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1824 if (cset_chooser->is_empty()) { 1825 ergo_verbose0(ErgoMixedGCs, 1826 false_action_str, 1827 ergo_format_reason("candidate old regions not available")); 1828 return false; 1829 } 1830 1831 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1832 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 1833 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1834 double threshold = (double) G1HeapWastePercent; 1835 if (reclaimable_perc <= threshold) { 1836 ergo_verbose4(ErgoMixedGCs, 1837 false_action_str, 1838 ergo_format_reason("reclaimable percentage not over threshold") 1839 ergo_format_region("candidate old regions") 1840 ergo_format_byte_perc("reclaimable") 1841 ergo_format_perc("threshold"), 1842 cset_chooser->remaining_regions(), 1843 reclaimable_bytes, 1844 reclaimable_perc, threshold); 1845 return false; 1846 } 1847 1848 ergo_verbose4(ErgoMixedGCs, 1849 true_action_str, 1850 ergo_format_reason("candidate old regions available") 1851 ergo_format_region("candidate old regions") 1852 ergo_format_byte_perc("reclaimable") 1853 ergo_format_perc("threshold"), 1854 cset_chooser->remaining_regions(), 1855 reclaimable_bytes, 1856 reclaimable_perc, threshold); 1857 return true; 1858 } 1859 1860 uint G1CollectorPolicy::calc_min_old_cset_length() { 1861 // The min old CSet region bound is based on the maximum desired 1862 // number of mixed GCs after a cycle. I.e., even if some old regions 1863 // look expensive, we should add them to the CSet anyway to make 1864 // sure we go through the available old regions in no more than the 1865 // maximum desired number of mixed GCs. 1866 // 1867 // The calculation is based on the number of marked regions we added 1868 // to the CSet chooser in the first place, not how many remain, so 1869 // that the result is the same during all mixed GCs that follow a cycle. 1870 1871 const size_t region_num = (size_t) _collectionSetChooser->length(); 1872 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1873 size_t result = region_num / gc_num; 1874 // emulate ceiling 1875 if (result * gc_num < region_num) { 1876 result += 1; 1877 } 1878 return (uint) result; 1879 } 1880 1881 uint G1CollectorPolicy::calc_max_old_cset_length() { 1882 // The max old CSet region bound is based on the threshold expressed 1883 // as a percentage of the heap size. I.e., it should bound the 1884 // number of old regions added to the CSet irrespective of how many 1885 // of them are available. 1886 1887 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1888 const size_t region_num = g1h->num_regions(); 1889 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1890 size_t result = region_num * perc / 100; 1891 // emulate ceiling 1892 if (100 * result < region_num * perc) { 1893 result += 1; 1894 } 1895 return (uint) result; 1896 } 1897 1898 1899 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { 1900 double young_start_time_sec = os::elapsedTime(); 1901 1902 YoungList* young_list = _g1->young_list(); 1903 finalize_incremental_cset_building(); 1904 1905 guarantee(target_pause_time_ms > 0.0, 1906 err_msg("target_pause_time_ms = %1.6lf should be positive", 1907 target_pause_time_ms)); 1908 guarantee(_collection_set == NULL, "Precondition"); 1909 1910 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 1911 double predicted_pause_time_ms = base_time_ms; 1912 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 1913 1914 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 1915 "start choosing CSet", 1916 ergo_format_size("_pending_cards") 1917 ergo_format_ms("predicted base time") 1918 ergo_format_ms("remaining time") 1919 ergo_format_ms("target pause time"), 1920 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 1921 1922 _last_gc_was_young = gcs_are_young() ? true : false; 1923 1924 if (_last_gc_was_young) { 1925 _trace_young_gen_time_data.increment_young_collection_count(); 1926 } else { 1927 _trace_young_gen_time_data.increment_mixed_collection_count(); 1928 } 1929 1930 // The young list is laid with the survivor regions from the previous 1931 // pause are appended to the RHS of the young list, i.e. 1932 // [Newly Young Regions ++ Survivors from last pause]. 1933 1934 uint survivor_region_length = young_list->survivor_length(); 1935 uint eden_region_length = young_list->length() - survivor_region_length; 1936 init_cset_region_lengths(eden_region_length, survivor_region_length); 1937 1938 HeapRegion* hr = young_list->first_survivor_region(); 1939 while (hr != NULL) { 1940 assert(hr->is_survivor(), "badly formed young list"); 1941 // There is a convention that all the young regions in the CSet 1942 // are tagged as "eden", so we do this for the survivors here. We 1943 // use the special set_eden_pre_gc() as it doesn't check that the 1944 // region is free (which is not the case here). 1945 hr->set_eden_pre_gc(); 1946 hr = hr->get_next_young_region(); 1947 } 1948 1949 // Clear the fields that point to the survivor list - they are all young now. 1950 young_list->clear_survivors(); 1951 1952 _collection_set = _inc_cset_head; 1953 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 1954 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 1955 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 1956 1957 ergo_verbose3(ErgoCSetConstruction | ErgoHigh, 1958 "add young regions to CSet", 1959 ergo_format_region("eden") 1960 ergo_format_region("survivors") 1961 ergo_format_ms("predicted young region time"), 1962 eden_region_length, survivor_region_length, 1963 _inc_cset_predicted_elapsed_time_ms); 1964 1965 // The number of recorded young regions is the incremental 1966 // collection set's current size 1967 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 1968 1969 double young_end_time_sec = os::elapsedTime(); 1970 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 1971 1972 // Set the start of the non-young choice time. 1973 double non_young_start_time_sec = young_end_time_sec; 1974 1975 if (!gcs_are_young()) { 1976 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1977 cset_chooser->verify(); 1978 const uint min_old_cset_length = calc_min_old_cset_length(); 1979 const uint max_old_cset_length = calc_max_old_cset_length(); 1980 1981 uint expensive_region_num = 0; 1982 bool check_time_remaining = adaptive_young_list_length(); 1983 1984 HeapRegion* hr = cset_chooser->peek(); 1985 while (hr != NULL) { 1986 if (old_cset_region_length() >= max_old_cset_length) { 1987 // Added maximum number of old regions to the CSet. 1988 ergo_verbose2(ErgoCSetConstruction, 1989 "finish adding old regions to CSet", 1990 ergo_format_reason("old CSet region num reached max") 1991 ergo_format_region("old") 1992 ergo_format_region("max"), 1993 old_cset_region_length(), max_old_cset_length); 1994 break; 1995 } 1996 1997 1998 // Stop adding regions if the remaining reclaimable space is 1999 // not above G1HeapWastePercent. 2000 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2001 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2002 double threshold = (double) G1HeapWastePercent; 2003 if (reclaimable_perc <= threshold) { 2004 // We've added enough old regions that the amount of uncollected 2005 // reclaimable space is at or below the waste threshold. Stop 2006 // adding old regions to the CSet. 2007 ergo_verbose5(ErgoCSetConstruction, 2008 "finish adding old regions to CSet", 2009 ergo_format_reason("reclaimable percentage not over threshold") 2010 ergo_format_region("old") 2011 ergo_format_region("max") 2012 ergo_format_byte_perc("reclaimable") 2013 ergo_format_perc("threshold"), 2014 old_cset_region_length(), 2015 max_old_cset_length, 2016 reclaimable_bytes, 2017 reclaimable_perc, threshold); 2018 break; 2019 } 2020 2021 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 2022 if (check_time_remaining) { 2023 if (predicted_time_ms > time_remaining_ms) { 2024 // Too expensive for the current CSet. 2025 2026 if (old_cset_region_length() >= min_old_cset_length) { 2027 // We have added the minimum number of old regions to the CSet, 2028 // we are done with this CSet. 2029 ergo_verbose4(ErgoCSetConstruction, 2030 "finish adding old regions to CSet", 2031 ergo_format_reason("predicted time is too high") 2032 ergo_format_ms("predicted time") 2033 ergo_format_ms("remaining time") 2034 ergo_format_region("old") 2035 ergo_format_region("min"), 2036 predicted_time_ms, time_remaining_ms, 2037 old_cset_region_length(), min_old_cset_length); 2038 break; 2039 } 2040 2041 // We'll add it anyway given that we haven't reached the 2042 // minimum number of old regions. 2043 expensive_region_num += 1; 2044 } 2045 } else { 2046 if (old_cset_region_length() >= min_old_cset_length) { 2047 // In the non-auto-tuning case, we'll finish adding regions 2048 // to the CSet if we reach the minimum. 2049 ergo_verbose2(ErgoCSetConstruction, 2050 "finish adding old regions to CSet", 2051 ergo_format_reason("old CSet region num reached min") 2052 ergo_format_region("old") 2053 ergo_format_region("min"), 2054 old_cset_region_length(), min_old_cset_length); 2055 break; 2056 } 2057 } 2058 2059 // We will add this region to the CSet. 2060 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2061 predicted_pause_time_ms += predicted_time_ms; 2062 cset_chooser->remove_and_move_to_next(hr); 2063 _g1->old_set_remove(hr); 2064 add_old_region_to_cset(hr); 2065 2066 hr = cset_chooser->peek(); 2067 } 2068 if (hr == NULL) { 2069 ergo_verbose0(ErgoCSetConstruction, 2070 "finish adding old regions to CSet", 2071 ergo_format_reason("candidate old regions not available")); 2072 } 2073 2074 if (expensive_region_num > 0) { 2075 // We print the information once here at the end, predicated on 2076 // whether we added any apparently expensive regions or not, to 2077 // avoid generating output per region. 2078 ergo_verbose4(ErgoCSetConstruction, 2079 "added expensive regions to CSet", 2080 ergo_format_reason("old CSet region num not reached min") 2081 ergo_format_region("old") 2082 ergo_format_region("expensive") 2083 ergo_format_region("min") 2084 ergo_format_ms("remaining time"), 2085 old_cset_region_length(), 2086 expensive_region_num, 2087 min_old_cset_length, 2088 time_remaining_ms); 2089 } 2090 2091 cset_chooser->verify(); 2092 } 2093 2094 stop_incremental_cset_building(); 2095 2096 ergo_verbose5(ErgoCSetConstruction, 2097 "finish choosing CSet", 2098 ergo_format_region("eden") 2099 ergo_format_region("survivors") 2100 ergo_format_region("old") 2101 ergo_format_ms("predicted pause time") 2102 ergo_format_ms("target pause time"), 2103 eden_region_length, survivor_region_length, 2104 old_cset_region_length(), 2105 predicted_pause_time_ms, target_pause_time_ms); 2106 2107 double non_young_end_time_sec = os::elapsedTime(); 2108 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2109 evacuation_info.set_collectionset_regions(cset_region_length()); 2110 } 2111 2112 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) { 2113 if(TraceYoungGenTime) { 2114 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2115 } 2116 } 2117 2118 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) { 2119 if(TraceYoungGenTime) { 2120 _all_yield_times_ms.add(yield_time_ms); 2121 } 2122 } 2123 2124 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2125 if(TraceYoungGenTime) { 2126 _total.add(pause_time_ms); 2127 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2128 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2129 _parallel.add(phase_times->cur_collection_par_time_ms()); 2130 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time()); 2131 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms()); 2132 _update_rs.add(phase_times->average_last_update_rs_time()); 2133 _scan_rs.add(phase_times->average_last_scan_rs_time()); 2134 _obj_copy.add(phase_times->average_last_obj_copy_time()); 2135 _termination.add(phase_times->average_last_termination_time()); 2136 2137 double parallel_known_time = phase_times->average_last_ext_root_scan_time() + 2138 phase_times->average_last_satb_filtering_times_ms() + 2139 phase_times->average_last_update_rs_time() + 2140 phase_times->average_last_scan_rs_time() + 2141 phase_times->average_last_obj_copy_time() + 2142 + phase_times->average_last_termination_time(); 2143 2144 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2145 _parallel_other.add(parallel_other_time); 2146 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2147 } 2148 } 2149 2150 void TraceYoungGenTimeData::increment_young_collection_count() { 2151 if(TraceYoungGenTime) { 2152 ++_young_pause_num; 2153 } 2154 } 2155 2156 void TraceYoungGenTimeData::increment_mixed_collection_count() { 2157 if(TraceYoungGenTime) { 2158 ++_mixed_pause_num; 2159 } 2160 } 2161 2162 void TraceYoungGenTimeData::print_summary(const char* str, 2163 const NumberSeq* seq) const { 2164 double sum = seq->sum(); 2165 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2166 str, sum / 1000.0, seq->avg()); 2167 } 2168 2169 void TraceYoungGenTimeData::print_summary_sd(const char* str, 2170 const NumberSeq* seq) const { 2171 print_summary(str, seq); 2172 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2173 "(num", seq->num(), seq->sd(), seq->maximum()); 2174 } 2175 2176 void TraceYoungGenTimeData::print() const { 2177 if (!TraceYoungGenTime) { 2178 return; 2179 } 2180 2181 gclog_or_tty->print_cr("ALL PAUSES"); 2182 print_summary_sd(" Total", &_total); 2183 gclog_or_tty->cr(); 2184 gclog_or_tty->cr(); 2185 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2186 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2187 gclog_or_tty->cr(); 2188 2189 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2190 2191 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2192 gclog_or_tty->print_cr("none"); 2193 } else { 2194 print_summary_sd(" Evacuation Pauses", &_total); 2195 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2196 print_summary(" Parallel Time", &_parallel); 2197 print_summary(" Ext Root Scanning", &_ext_root_scan); 2198 print_summary(" SATB Filtering", &_satb_filtering); 2199 print_summary(" Update RS", &_update_rs); 2200 print_summary(" Scan RS", &_scan_rs); 2201 print_summary(" Object Copy", &_obj_copy); 2202 print_summary(" Termination", &_termination); 2203 print_summary(" Parallel Other", &_parallel_other); 2204 print_summary(" Clear CT", &_clear_ct); 2205 print_summary(" Other", &_other); 2206 } 2207 gclog_or_tty->cr(); 2208 2209 gclog_or_tty->print_cr("MISC"); 2210 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2211 print_summary_sd(" Yields", &_all_yield_times_ms); 2212 } 2213 2214 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) { 2215 if (TraceOldGenTime) { 2216 _all_full_gc_times.add(full_gc_time_ms); 2217 } 2218 } 2219 2220 void TraceOldGenTimeData::print() const { 2221 if (!TraceOldGenTime) { 2222 return; 2223 } 2224 2225 if (_all_full_gc_times.num() > 0) { 2226 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2227 _all_full_gc_times.num(), 2228 _all_full_gc_times.sum() / 1000.0); 2229 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2230 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2231 _all_full_gc_times.sd(), 2232 _all_full_gc_times.maximum()); 2233 } 2234 }