1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentG1Refine.hpp" 27 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "gc_implementation/shared/gcPolicyCounters.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "utilities/debug.hpp" 40 41 // Different defaults for different number of GC threads 42 // They were chosen by running GCOld and SPECjbb on debris with different 43 // numbers of GC threads and choosing them based on the results 44 45 // all the same 46 static double rs_length_diff_defaults[] = { 47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 48 }; 49 50 static double cost_per_card_ms_defaults[] = { 51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 52 }; 53 54 // all the same 55 static double young_cards_per_entry_ratio_defaults[] = { 56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 57 }; 58 59 static double cost_per_entry_ms_defaults[] = { 60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 61 }; 62 63 static double cost_per_byte_ms_defaults[] = { 64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 65 }; 66 67 // these should be pretty consistent 68 static double constant_other_time_ms_defaults[] = { 69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 70 }; 71 72 73 static double young_other_cost_per_region_ms_defaults[] = { 74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 75 }; 76 77 static double non_young_other_cost_per_region_ms_defaults[] = { 78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 79 }; 80 81 G1CollectorPolicy::G1CollectorPolicy() : 82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 83 ? ParallelGCThreads : 1), 84 85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 86 _stop_world_start(0.0), 87 88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 92 _prev_collection_pause_end_ms(0.0), 93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _non_young_other_cost_per_region_ms_seq( 104 new TruncatedSeq(TruncatedSeqLength)), 105 106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 108 109 _pause_time_target_ms((double) MaxGCPauseMillis), 110 111 _gcs_are_young(true), 112 113 _during_marking(false), 114 _in_marking_window(false), 115 _in_marking_window_im(false), 116 117 _recent_prev_end_times_for_all_gcs_sec( 118 new TruncatedSeq(NumPrevPausesForHeuristics)), 119 120 _recent_avg_pause_time_ratio(0.0), 121 122 _initiate_conc_mark_if_possible(false), 123 _during_initial_mark_pause(false), 124 _last_young_gc(false), 125 _last_gc_was_young(false), 126 127 _eden_used_bytes_before_gc(0), 128 _survivor_used_bytes_before_gc(0), 129 _heap_used_bytes_before_gc(0), 130 _metaspace_used_bytes_before_gc(0), 131 _eden_capacity_bytes_before_gc(0), 132 _heap_capacity_bytes_before_gc(0), 133 134 _eden_cset_region_length(0), 135 _survivor_cset_region_length(0), 136 _old_cset_region_length(0), 137 138 _collection_set(NULL), 139 _collection_set_bytes_used_before(0), 140 141 // Incremental CSet attributes 142 _inc_cset_build_state(Inactive), 143 _inc_cset_head(NULL), 144 _inc_cset_tail(NULL), 145 _inc_cset_bytes_used_before(0), 146 _inc_cset_max_finger(NULL), 147 _inc_cset_recorded_rs_lengths(0), 148 _inc_cset_recorded_rs_lengths_diffs(0), 149 _inc_cset_predicted_elapsed_time_ms(0.0), 150 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 151 152 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 153 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 154 #endif // _MSC_VER 155 156 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", 157 G1YoungSurvRateNumRegionsSummary)), 158 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", 159 G1YoungSurvRateNumRegionsSummary)), 160 // add here any more surv rate groups 161 _recorded_survivor_regions(0), 162 _recorded_survivor_head(NULL), 163 _recorded_survivor_tail(NULL), 164 _survivors_age_table(true), 165 166 _gc_overhead_perc(0.0) { 167 168 // Set up the region size and associated fields. Given that the 169 // policy is created before the heap, we have to set this up here, 170 // so it's done as soon as possible. 171 HeapRegion::setup_heap_region_size(Arguments::min_heap_size()); 172 HeapRegionRemSet::setup_remset_size(); 173 174 G1ErgoVerbose::initialize(); 175 if (PrintAdaptiveSizePolicy) { 176 // Currently, we only use a single switch for all the heuristics. 177 G1ErgoVerbose::set_enabled(true); 178 // Given that we don't currently have a verboseness level 179 // parameter, we'll hardcode this to high. This can be easily 180 // changed in the future. 181 G1ErgoVerbose::set_level(ErgoHigh); 182 } else { 183 G1ErgoVerbose::set_enabled(false); 184 } 185 186 // Verify PLAB sizes 187 const size_t region_size = HeapRegion::GrainWords; 188 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 189 char buffer[128]; 190 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT, 191 OldPLABSize > region_size ? "Old" : "Young", region_size); 192 vm_exit_during_initialization(buffer); 193 } 194 195 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 196 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 197 198 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 199 200 int index = MIN2(_parallel_gc_threads - 1, 7); 201 202 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 203 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 204 _young_cards_per_entry_ratio_seq->add( 205 young_cards_per_entry_ratio_defaults[index]); 206 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 207 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 208 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 209 _young_other_cost_per_region_ms_seq->add( 210 young_other_cost_per_region_ms_defaults[index]); 211 _non_young_other_cost_per_region_ms_seq->add( 212 non_young_other_cost_per_region_ms_defaults[index]); 213 214 // Below, we might need to calculate the pause time target based on 215 // the pause interval. When we do so we are going to give G1 maximum 216 // flexibility and allow it to do pauses when it needs to. So, we'll 217 // arrange that the pause interval to be pause time target + 1 to 218 // ensure that a) the pause time target is maximized with respect to 219 // the pause interval and b) we maintain the invariant that pause 220 // time target < pause interval. If the user does not want this 221 // maximum flexibility, they will have to set the pause interval 222 // explicitly. 223 224 // First make sure that, if either parameter is set, its value is 225 // reasonable. 226 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 227 if (MaxGCPauseMillis < 1) { 228 vm_exit_during_initialization("MaxGCPauseMillis should be " 229 "greater than 0"); 230 } 231 } 232 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 233 if (GCPauseIntervalMillis < 1) { 234 vm_exit_during_initialization("GCPauseIntervalMillis should be " 235 "greater than 0"); 236 } 237 } 238 239 // Then, if the pause time target parameter was not set, set it to 240 // the default value. 241 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 242 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 243 // The default pause time target in G1 is 200ms 244 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 245 } else { 246 // We do not allow the pause interval to be set without the 247 // pause time target 248 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 249 "without setting MaxGCPauseMillis"); 250 } 251 } 252 253 // Then, if the interval parameter was not set, set it according to 254 // the pause time target (this will also deal with the case when the 255 // pause time target is the default value). 256 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 257 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 258 } 259 260 // Finally, make sure that the two parameters are consistent. 261 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 262 char buffer[256]; 263 jio_snprintf(buffer, 256, 264 "MaxGCPauseMillis (%u) should be less than " 265 "GCPauseIntervalMillis (%u)", 266 MaxGCPauseMillis, GCPauseIntervalMillis); 267 vm_exit_during_initialization(buffer); 268 } 269 270 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 271 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 272 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 273 274 uintx confidence_perc = G1ConfidencePercent; 275 // Put an artificial ceiling on this so that it's not set to a silly value. 276 if (confidence_perc > 100) { 277 confidence_perc = 100; 278 warning("G1ConfidencePercent is set to a value that is too large, " 279 "it's been updated to %u", confidence_perc); 280 } 281 _sigma = (double) confidence_perc / 100.0; 282 283 // start conservatively (around 50ms is about right) 284 _concurrent_mark_remark_times_ms->add(0.05); 285 _concurrent_mark_cleanup_times_ms->add(0.20); 286 _tenuring_threshold = MaxTenuringThreshold; 287 // _max_survivor_regions will be calculated by 288 // update_young_list_target_length() during initialization. 289 _max_survivor_regions = 0; 290 291 assert(GCTimeRatio > 0, 292 "we should have set it to a default value set_g1_gc_flags() " 293 "if a user set it to 0"); 294 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 295 296 uintx reserve_perc = G1ReservePercent; 297 // Put an artificial ceiling on this so that it's not set to a silly value. 298 if (reserve_perc > 50) { 299 reserve_perc = 50; 300 warning("G1ReservePercent is set to a value that is too large, " 301 "it's been updated to %u", reserve_perc); 302 } 303 _reserve_factor = (double) reserve_perc / 100.0; 304 // This will be set when the heap is expanded 305 // for the first time during initialization. 306 _reserve_regions = 0; 307 308 initialize_all(); 309 _collectionSetChooser = new CollectionSetChooser(); 310 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 311 } 312 313 void G1CollectorPolicy::initialize_flags() { 314 set_min_alignment(HeapRegion::GrainBytes); 315 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 316 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 317 set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size)); 318 if (SurvivorRatio < 1) { 319 vm_exit_during_initialization("Invalid survivor ratio specified"); 320 } 321 CollectorPolicy::initialize_flags(); 322 } 323 324 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) { 325 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max"); 326 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds"); 327 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds"); 328 329 if (FLAG_IS_CMDLINE(NewRatio)) { 330 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 331 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 332 } else { 333 _sizer_kind = SizerNewRatio; 334 _adaptive_size = false; 335 return; 336 } 337 } 338 339 if (FLAG_IS_CMDLINE(NewSize)) { 340 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 341 1U); 342 if (FLAG_IS_CMDLINE(MaxNewSize)) { 343 _max_desired_young_length = 344 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 345 1U); 346 _sizer_kind = SizerMaxAndNewSize; 347 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 348 } else { 349 _sizer_kind = SizerNewSizeOnly; 350 } 351 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 352 _max_desired_young_length = 353 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 354 1U); 355 _sizer_kind = SizerMaxNewSizeOnly; 356 } 357 } 358 359 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 360 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 361 return MAX2(1U, default_value); 362 } 363 364 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 365 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 366 return MAX2(1U, default_value); 367 } 368 369 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 370 assert(new_number_of_heap_regions > 0, "Heap must be initialized"); 371 372 switch (_sizer_kind) { 373 case SizerDefaults: 374 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 375 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 376 break; 377 case SizerNewSizeOnly: 378 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 379 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length); 380 break; 381 case SizerMaxNewSizeOnly: 382 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 383 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length); 384 break; 385 case SizerMaxAndNewSize: 386 // Do nothing. Values set on the command line, don't update them at runtime. 387 break; 388 case SizerNewRatio: 389 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1); 390 _max_desired_young_length = _min_desired_young_length; 391 break; 392 default: 393 ShouldNotReachHere(); 394 } 395 396 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values"); 397 } 398 399 void G1CollectorPolicy::init() { 400 // Set aside an initial future to_space. 401 _g1 = G1CollectedHeap::heap(); 402 403 assert(Heap_lock->owned_by_self(), "Locking discipline."); 404 405 initialize_gc_policy_counters(); 406 407 if (adaptive_young_list_length()) { 408 _young_list_fixed_length = 0; 409 } else { 410 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 411 } 412 _free_regions_at_end_of_collection = _g1->free_regions(); 413 update_young_list_target_length(); 414 415 // We may immediately start allocating regions and placing them on the 416 // collection set list. Initialize the per-collection set info 417 start_incremental_cset_building(); 418 } 419 420 // Create the jstat counters for the policy. 421 void G1CollectorPolicy::initialize_gc_policy_counters() { 422 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 423 } 424 425 bool G1CollectorPolicy::predict_will_fit(uint young_length, 426 double base_time_ms, 427 uint base_free_regions, 428 double target_pause_time_ms) { 429 if (young_length >= base_free_regions) { 430 // end condition 1: not enough space for the young regions 431 return false; 432 } 433 434 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 435 size_t bytes_to_copy = 436 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 437 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 438 double young_other_time_ms = predict_young_other_time_ms(young_length); 439 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 440 if (pause_time_ms > target_pause_time_ms) { 441 // end condition 2: prediction is over the target pause time 442 return false; 443 } 444 445 size_t free_bytes = 446 (base_free_regions - young_length) * HeapRegion::GrainBytes; 447 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { 448 // end condition 3: out-of-space (conservatively!) 449 return false; 450 } 451 452 // success! 453 return true; 454 } 455 456 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 457 // re-calculate the necessary reserve 458 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 459 // We use ceiling so that if reserve_regions_d is > 0.0 (but 460 // smaller than 1.0) we'll get 1. 461 _reserve_regions = (uint) ceil(reserve_regions_d); 462 463 _young_gen_sizer->heap_size_changed(new_number_of_regions); 464 } 465 466 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 467 uint base_min_length) { 468 uint desired_min_length = 0; 469 if (adaptive_young_list_length()) { 470 if (_alloc_rate_ms_seq->num() > 3) { 471 double now_sec = os::elapsedTime(); 472 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 473 double alloc_rate_ms = predict_alloc_rate_ms(); 474 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 475 } else { 476 // otherwise we don't have enough info to make the prediction 477 } 478 } 479 desired_min_length += base_min_length; 480 // make sure we don't go below any user-defined minimum bound 481 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 482 } 483 484 uint G1CollectorPolicy::calculate_young_list_desired_max_length() { 485 // Here, we might want to also take into account any additional 486 // constraints (i.e., user-defined minimum bound). Currently, we 487 // effectively don't set this bound. 488 return _young_gen_sizer->max_desired_young_length(); 489 } 490 491 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 492 if (rs_lengths == (size_t) -1) { 493 // if it's set to the default value (-1), we should predict it; 494 // otherwise, use the given value. 495 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 496 } 497 498 // Calculate the absolute and desired min bounds. 499 500 // This is how many young regions we already have (currently: the survivors). 501 uint base_min_length = recorded_survivor_regions(); 502 // This is the absolute minimum young length, which ensures that we 503 // can allocate one eden region in the worst-case. 504 uint absolute_min_length = base_min_length + 1; 505 uint desired_min_length = 506 calculate_young_list_desired_min_length(base_min_length); 507 if (desired_min_length < absolute_min_length) { 508 desired_min_length = absolute_min_length; 509 } 510 511 // Calculate the absolute and desired max bounds. 512 513 // We will try our best not to "eat" into the reserve. 514 uint absolute_max_length = 0; 515 if (_free_regions_at_end_of_collection > _reserve_regions) { 516 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 517 } 518 uint desired_max_length = calculate_young_list_desired_max_length(); 519 if (desired_max_length > absolute_max_length) { 520 desired_max_length = absolute_max_length; 521 } 522 523 uint young_list_target_length = 0; 524 if (adaptive_young_list_length()) { 525 if (gcs_are_young()) { 526 young_list_target_length = 527 calculate_young_list_target_length(rs_lengths, 528 base_min_length, 529 desired_min_length, 530 desired_max_length); 531 _rs_lengths_prediction = rs_lengths; 532 } else { 533 // Don't calculate anything and let the code below bound it to 534 // the desired_min_length, i.e., do the next GC as soon as 535 // possible to maximize how many old regions we can add to it. 536 } 537 } else { 538 // The user asked for a fixed young gen so we'll fix the young gen 539 // whether the next GC is young or mixed. 540 young_list_target_length = _young_list_fixed_length; 541 } 542 543 // Make sure we don't go over the desired max length, nor under the 544 // desired min length. In case they clash, desired_min_length wins 545 // which is why that test is second. 546 if (young_list_target_length > desired_max_length) { 547 young_list_target_length = desired_max_length; 548 } 549 if (young_list_target_length < desired_min_length) { 550 young_list_target_length = desired_min_length; 551 } 552 553 assert(young_list_target_length > recorded_survivor_regions(), 554 "we should be able to allocate at least one eden region"); 555 assert(young_list_target_length >= absolute_min_length, "post-condition"); 556 _young_list_target_length = young_list_target_length; 557 558 update_max_gc_locker_expansion(); 559 } 560 561 uint 562 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 563 uint base_min_length, 564 uint desired_min_length, 565 uint desired_max_length) { 566 assert(adaptive_young_list_length(), "pre-condition"); 567 assert(gcs_are_young(), "only call this for young GCs"); 568 569 // In case some edge-condition makes the desired max length too small... 570 if (desired_max_length <= desired_min_length) { 571 return desired_min_length; 572 } 573 574 // We'll adjust min_young_length and max_young_length not to include 575 // the already allocated young regions (i.e., so they reflect the 576 // min and max eden regions we'll allocate). The base_min_length 577 // will be reflected in the predictions by the 578 // survivor_regions_evac_time prediction. 579 assert(desired_min_length > base_min_length, "invariant"); 580 uint min_young_length = desired_min_length - base_min_length; 581 assert(desired_max_length > base_min_length, "invariant"); 582 uint max_young_length = desired_max_length - base_min_length; 583 584 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 585 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 586 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 587 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 588 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 589 double base_time_ms = 590 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 591 survivor_regions_evac_time; 592 uint available_free_regions = _free_regions_at_end_of_collection; 593 uint base_free_regions = 0; 594 if (available_free_regions > _reserve_regions) { 595 base_free_regions = available_free_regions - _reserve_regions; 596 } 597 598 // Here, we will make sure that the shortest young length that 599 // makes sense fits within the target pause time. 600 601 if (predict_will_fit(min_young_length, base_time_ms, 602 base_free_regions, target_pause_time_ms)) { 603 // The shortest young length will fit into the target pause time; 604 // we'll now check whether the absolute maximum number of young 605 // regions will fit in the target pause time. If not, we'll do 606 // a binary search between min_young_length and max_young_length. 607 if (predict_will_fit(max_young_length, base_time_ms, 608 base_free_regions, target_pause_time_ms)) { 609 // The maximum young length will fit into the target pause time. 610 // We are done so set min young length to the maximum length (as 611 // the result is assumed to be returned in min_young_length). 612 min_young_length = max_young_length; 613 } else { 614 // The maximum possible number of young regions will not fit within 615 // the target pause time so we'll search for the optimal 616 // length. The loop invariants are: 617 // 618 // min_young_length < max_young_length 619 // min_young_length is known to fit into the target pause time 620 // max_young_length is known not to fit into the target pause time 621 // 622 // Going into the loop we know the above hold as we've just 623 // checked them. Every time around the loop we check whether 624 // the middle value between min_young_length and 625 // max_young_length fits into the target pause time. If it 626 // does, it becomes the new min. If it doesn't, it becomes 627 // the new max. This way we maintain the loop invariants. 628 629 assert(min_young_length < max_young_length, "invariant"); 630 uint diff = (max_young_length - min_young_length) / 2; 631 while (diff > 0) { 632 uint young_length = min_young_length + diff; 633 if (predict_will_fit(young_length, base_time_ms, 634 base_free_regions, target_pause_time_ms)) { 635 min_young_length = young_length; 636 } else { 637 max_young_length = young_length; 638 } 639 assert(min_young_length < max_young_length, "invariant"); 640 diff = (max_young_length - min_young_length) / 2; 641 } 642 // The results is min_young_length which, according to the 643 // loop invariants, should fit within the target pause time. 644 645 // These are the post-conditions of the binary search above: 646 assert(min_young_length < max_young_length, 647 "otherwise we should have discovered that max_young_length " 648 "fits into the pause target and not done the binary search"); 649 assert(predict_will_fit(min_young_length, base_time_ms, 650 base_free_regions, target_pause_time_ms), 651 "min_young_length, the result of the binary search, should " 652 "fit into the pause target"); 653 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 654 base_free_regions, target_pause_time_ms), 655 "min_young_length, the result of the binary search, should be " 656 "optimal, so no larger length should fit into the pause target"); 657 } 658 } else { 659 // Even the minimum length doesn't fit into the pause time 660 // target, return it as the result nevertheless. 661 } 662 return base_min_length + min_young_length; 663 } 664 665 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 666 double survivor_regions_evac_time = 0.0; 667 for (HeapRegion * r = _recorded_survivor_head; 668 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 669 r = r->get_next_young_region()) { 670 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); 671 } 672 return survivor_regions_evac_time; 673 } 674 675 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 676 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 677 678 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 679 if (rs_lengths > _rs_lengths_prediction) { 680 // add 10% to avoid having to recalculate often 681 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 682 update_young_list_target_length(rs_lengths_prediction); 683 } 684 } 685 686 687 688 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 689 bool is_tlab, 690 bool* gc_overhead_limit_was_exceeded) { 691 guarantee(false, "Not using this policy feature yet."); 692 return NULL; 693 } 694 695 // This method controls how a collector handles one or more 696 // of its generations being fully allocated. 697 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 698 bool is_tlab) { 699 guarantee(false, "Not using this policy feature yet."); 700 return NULL; 701 } 702 703 704 #ifndef PRODUCT 705 bool G1CollectorPolicy::verify_young_ages() { 706 HeapRegion* head = _g1->young_list()->first_region(); 707 return 708 verify_young_ages(head, _short_lived_surv_rate_group); 709 // also call verify_young_ages on any additional surv rate groups 710 } 711 712 bool 713 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 714 SurvRateGroup *surv_rate_group) { 715 guarantee( surv_rate_group != NULL, "pre-condition" ); 716 717 const char* name = surv_rate_group->name(); 718 bool ret = true; 719 int prev_age = -1; 720 721 for (HeapRegion* curr = head; 722 curr != NULL; 723 curr = curr->get_next_young_region()) { 724 SurvRateGroup* group = curr->surv_rate_group(); 725 if (group == NULL && !curr->is_survivor()) { 726 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 727 ret = false; 728 } 729 730 if (surv_rate_group == group) { 731 int age = curr->age_in_surv_rate_group(); 732 733 if (age < 0) { 734 gclog_or_tty->print_cr("## %s: encountered negative age", name); 735 ret = false; 736 } 737 738 if (age <= prev_age) { 739 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 740 "(%d, %d)", name, age, prev_age); 741 ret = false; 742 } 743 prev_age = age; 744 } 745 } 746 747 return ret; 748 } 749 #endif // PRODUCT 750 751 void G1CollectorPolicy::record_full_collection_start() { 752 _full_collection_start_sec = os::elapsedTime(); 753 record_heap_size_info_at_start(true /* full */); 754 // Release the future to-space so that it is available for compaction into. 755 _g1->set_full_collection(); 756 } 757 758 void G1CollectorPolicy::record_full_collection_end() { 759 // Consider this like a collection pause for the purposes of allocation 760 // since last pause. 761 double end_sec = os::elapsedTime(); 762 double full_gc_time_sec = end_sec - _full_collection_start_sec; 763 double full_gc_time_ms = full_gc_time_sec * 1000.0; 764 765 _trace_gen1_time_data.record_full_collection(full_gc_time_ms); 766 767 update_recent_gc_times(end_sec, full_gc_time_ms); 768 769 _g1->clear_full_collection(); 770 771 // "Nuke" the heuristics that control the young/mixed GC 772 // transitions and make sure we start with young GCs after the Full GC. 773 set_gcs_are_young(true); 774 _last_young_gc = false; 775 clear_initiate_conc_mark_if_possible(); 776 clear_during_initial_mark_pause(); 777 _in_marking_window = false; 778 _in_marking_window_im = false; 779 780 _short_lived_surv_rate_group->start_adding_regions(); 781 // also call this on any additional surv rate groups 782 783 record_survivor_regions(0, NULL, NULL); 784 785 _free_regions_at_end_of_collection = _g1->free_regions(); 786 // Reset survivors SurvRateGroup. 787 _survivor_surv_rate_group->reset(); 788 update_young_list_target_length(); 789 _collectionSetChooser->clear(); 790 } 791 792 void G1CollectorPolicy::record_stop_world_start() { 793 _stop_world_start = os::elapsedTime(); 794 } 795 796 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 797 // We only need to do this here as the policy will only be applied 798 // to the GC we're about to start. so, no point is calculating this 799 // every time we calculate / recalculate the target young length. 800 update_survivors_policy(); 801 802 assert(_g1->used() == _g1->recalculate_used(), 803 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 804 _g1->used(), _g1->recalculate_used())); 805 806 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 807 _trace_gen0_time_data.record_start_collection(s_w_t_ms); 808 _stop_world_start = 0.0; 809 810 record_heap_size_info_at_start(false /* full */); 811 812 phase_times()->record_cur_collection_start_sec(start_time_sec); 813 _pending_cards = _g1->pending_card_num(); 814 815 _collection_set_bytes_used_before = 0; 816 _bytes_copied_during_gc = 0; 817 818 _last_gc_was_young = false; 819 820 // do that for any other surv rate groups 821 _short_lived_surv_rate_group->stop_adding_regions(); 822 _survivors_age_table.clear(); 823 824 assert( verify_young_ages(), "region age verification" ); 825 } 826 827 void G1CollectorPolicy::record_concurrent_mark_init_end(double 828 mark_init_elapsed_time_ms) { 829 _during_marking = true; 830 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 831 clear_during_initial_mark_pause(); 832 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 833 } 834 835 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 836 _mark_remark_start_sec = os::elapsedTime(); 837 _during_marking = false; 838 } 839 840 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 841 double end_time_sec = os::elapsedTime(); 842 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 843 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 844 _cur_mark_stop_world_time_ms += elapsed_time_ms; 845 _prev_collection_pause_end_ms += elapsed_time_ms; 846 847 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 848 } 849 850 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 851 _mark_cleanup_start_sec = os::elapsedTime(); 852 } 853 854 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 855 _last_young_gc = true; 856 _in_marking_window = false; 857 } 858 859 void G1CollectorPolicy::record_concurrent_pause() { 860 if (_stop_world_start > 0.0) { 861 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 862 _trace_gen0_time_data.record_yield_time(yield_ms); 863 } 864 } 865 866 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 867 if (_g1->concurrent_mark()->cmThread()->during_cycle()) { 868 return false; 869 } 870 871 size_t marking_initiating_used_threshold = 872 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 873 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 874 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 875 876 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { 877 if (gcs_are_young() && !_last_young_gc) { 878 ergo_verbose5(ErgoConcCycles, 879 "request concurrent cycle initiation", 880 ergo_format_reason("occupancy higher than threshold") 881 ergo_format_byte("occupancy") 882 ergo_format_byte("allocation request") 883 ergo_format_byte_perc("threshold") 884 ergo_format_str("source"), 885 cur_used_bytes, 886 alloc_byte_size, 887 marking_initiating_used_threshold, 888 (double) InitiatingHeapOccupancyPercent, 889 source); 890 return true; 891 } else { 892 ergo_verbose5(ErgoConcCycles, 893 "do not request concurrent cycle initiation", 894 ergo_format_reason("still doing mixed collections") 895 ergo_format_byte("occupancy") 896 ergo_format_byte("allocation request") 897 ergo_format_byte_perc("threshold") 898 ergo_format_str("source"), 899 cur_used_bytes, 900 alloc_byte_size, 901 marking_initiating_used_threshold, 902 (double) InitiatingHeapOccupancyPercent, 903 source); 904 } 905 } 906 907 return false; 908 } 909 910 // Anything below that is considered to be zero 911 #define MIN_TIMER_GRANULARITY 0.0000001 912 913 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { 914 double end_time_sec = os::elapsedTime(); 915 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), 916 "otherwise, the subtraction below does not make sense"); 917 size_t rs_size = 918 _cur_collection_pause_used_regions_at_start - cset_region_length(); 919 size_t cur_used_bytes = _g1->used(); 920 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 921 bool last_pause_included_initial_mark = false; 922 bool update_stats = !_g1->evacuation_failed(); 923 924 #ifndef PRODUCT 925 if (G1YoungSurvRateVerbose) { 926 gclog_or_tty->print_cr(""); 927 _short_lived_surv_rate_group->print(); 928 // do that for any other surv rate groups too 929 } 930 #endif // PRODUCT 931 932 last_pause_included_initial_mark = during_initial_mark_pause(); 933 if (last_pause_included_initial_mark) { 934 record_concurrent_mark_init_end(0.0); 935 } else if (need_to_start_conc_mark("end of GC")) { 936 // Note: this might have already been set, if during the last 937 // pause we decided to start a cycle but at the beginning of 938 // this pause we decided to postpone it. That's OK. 939 set_initiate_conc_mark_if_possible(); 940 } 941 942 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, 943 end_time_sec, false); 944 945 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); 946 evacuation_info.set_bytes_copied(_bytes_copied_during_gc); 947 948 if (update_stats) { 949 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); 950 // this is where we update the allocation rate of the application 951 double app_time_ms = 952 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 953 if (app_time_ms < MIN_TIMER_GRANULARITY) { 954 // This usually happens due to the timer not having the required 955 // granularity. Some Linuxes are the usual culprits. 956 // We'll just set it to something (arbitrarily) small. 957 app_time_ms = 1.0; 958 } 959 // We maintain the invariant that all objects allocated by mutator 960 // threads will be allocated out of eden regions. So, we can use 961 // the eden region number allocated since the previous GC to 962 // calculate the application's allocate rate. The only exception 963 // to that is humongous objects that are allocated separately. But 964 // given that humongous object allocations do not really affect 965 // either the pause's duration nor when the next pause will take 966 // place we can safely ignore them here. 967 uint regions_allocated = eden_cset_region_length(); 968 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 969 _alloc_rate_ms_seq->add(alloc_rate_ms); 970 971 double interval_ms = 972 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 973 update_recent_gc_times(end_time_sec, pause_time_ms); 974 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 975 if (recent_avg_pause_time_ratio() < 0.0 || 976 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 977 #ifndef PRODUCT 978 // Dump info to allow post-facto debugging 979 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 980 gclog_or_tty->print_cr("-------------------------------------------"); 981 gclog_or_tty->print_cr("Recent GC Times (ms):"); 982 _recent_gc_times_ms->dump(); 983 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 984 _recent_prev_end_times_for_all_gcs_sec->dump(); 985 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 986 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 987 // In debug mode, terminate the JVM if the user wants to debug at this point. 988 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 989 #endif // !PRODUCT 990 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 991 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 992 if (_recent_avg_pause_time_ratio < 0.0) { 993 _recent_avg_pause_time_ratio = 0.0; 994 } else { 995 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 996 _recent_avg_pause_time_ratio = 1.0; 997 } 998 } 999 } 1000 1001 bool new_in_marking_window = _in_marking_window; 1002 bool new_in_marking_window_im = false; 1003 if (during_initial_mark_pause()) { 1004 new_in_marking_window = true; 1005 new_in_marking_window_im = true; 1006 } 1007 1008 if (_last_young_gc) { 1009 // This is supposed to to be the "last young GC" before we start 1010 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1011 1012 if (!last_pause_included_initial_mark) { 1013 if (next_gc_should_be_mixed("start mixed GCs", 1014 "do not start mixed GCs")) { 1015 set_gcs_are_young(false); 1016 } 1017 } else { 1018 ergo_verbose0(ErgoMixedGCs, 1019 "do not start mixed GCs", 1020 ergo_format_reason("concurrent cycle is about to start")); 1021 } 1022 _last_young_gc = false; 1023 } 1024 1025 if (!_last_gc_was_young) { 1026 // This is a mixed GC. Here we decide whether to continue doing 1027 // mixed GCs or not. 1028 1029 if (!next_gc_should_be_mixed("continue mixed GCs", 1030 "do not continue mixed GCs")) { 1031 set_gcs_are_young(true); 1032 } 1033 } 1034 1035 _short_lived_surv_rate_group->start_adding_regions(); 1036 // do that for any other surv rate groupsx 1037 1038 if (update_stats) { 1039 double cost_per_card_ms = 0.0; 1040 if (_pending_cards > 0) { 1041 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards; 1042 _cost_per_card_ms_seq->add(cost_per_card_ms); 1043 } 1044 1045 size_t cards_scanned = _g1->cards_scanned(); 1046 1047 double cost_per_entry_ms = 0.0; 1048 if (cards_scanned > 10) { 1049 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; 1050 if (_last_gc_was_young) { 1051 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1052 } else { 1053 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1054 } 1055 } 1056 1057 if (_max_rs_lengths > 0) { 1058 double cards_per_entry_ratio = 1059 (double) cards_scanned / (double) _max_rs_lengths; 1060 if (_last_gc_was_young) { 1061 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1062 } else { 1063 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1064 } 1065 } 1066 1067 // This is defensive. For a while _max_rs_lengths could get 1068 // smaller than _recorded_rs_lengths which was causing 1069 // rs_length_diff to get very large and mess up the RSet length 1070 // predictions. The reason was unsafe concurrent updates to the 1071 // _inc_cset_recorded_rs_lengths field which the code below guards 1072 // against (see CR 7118202). This bug has now been fixed (see CR 1073 // 7119027). However, I'm still worried that 1074 // _inc_cset_recorded_rs_lengths might still end up somewhat 1075 // inaccurate. The concurrent refinement thread calculates an 1076 // RSet's length concurrently with other CR threads updating it 1077 // which might cause it to calculate the length incorrectly (if, 1078 // say, it's in mid-coarsening). So I'll leave in the defensive 1079 // conditional below just in case. 1080 size_t rs_length_diff = 0; 1081 if (_max_rs_lengths > _recorded_rs_lengths) { 1082 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1083 } 1084 _rs_length_diff_seq->add((double) rs_length_diff); 1085 1086 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes; 1087 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1088 double cost_per_byte_ms = 0.0; 1089 1090 if (copied_bytes > 0) { 1091 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; 1092 if (_in_marking_window) { 1093 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1094 } else { 1095 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1096 } 1097 } 1098 1099 double all_other_time_ms = pause_time_ms - 1100 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time() 1101 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time()); 1102 1103 double young_other_time_ms = 0.0; 1104 if (young_cset_region_length() > 0) { 1105 young_other_time_ms = 1106 phase_times()->young_cset_choice_time_ms() + 1107 phase_times()->young_free_cset_time_ms(); 1108 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1109 (double) young_cset_region_length()); 1110 } 1111 double non_young_other_time_ms = 0.0; 1112 if (old_cset_region_length() > 0) { 1113 non_young_other_time_ms = 1114 phase_times()->non_young_cset_choice_time_ms() + 1115 phase_times()->non_young_free_cset_time_ms(); 1116 1117 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1118 (double) old_cset_region_length()); 1119 } 1120 1121 double constant_other_time_ms = all_other_time_ms - 1122 (young_other_time_ms + non_young_other_time_ms); 1123 _constant_other_time_ms_seq->add(constant_other_time_ms); 1124 1125 double survival_ratio = 0.0; 1126 if (_collection_set_bytes_used_before > 0) { 1127 survival_ratio = (double) _bytes_copied_during_gc / 1128 (double) _collection_set_bytes_used_before; 1129 } 1130 1131 _pending_cards_seq->add((double) _pending_cards); 1132 _rs_lengths_seq->add((double) _max_rs_lengths); 1133 } 1134 1135 _in_marking_window = new_in_marking_window; 1136 _in_marking_window_im = new_in_marking_window_im; 1137 _free_regions_at_end_of_collection = _g1->free_regions(); 1138 update_young_list_target_length(); 1139 1140 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1141 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1142 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), 1143 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms); 1144 1145 _collectionSetChooser->verify(); 1146 } 1147 1148 #define EXT_SIZE_FORMAT "%.1f%s" 1149 #define EXT_SIZE_PARAMS(bytes) \ 1150 byte_size_in_proper_unit((double)(bytes)), \ 1151 proper_unit_for_byte_size((bytes)) 1152 1153 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { 1154 YoungList* young_list = _g1->young_list(); 1155 _eden_used_bytes_before_gc = young_list->eden_used_bytes(); 1156 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); 1157 _heap_capacity_bytes_before_gc = _g1->capacity(); 1158 _heap_used_bytes_before_gc = _g1->used(); 1159 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 1160 1161 _eden_capacity_bytes_before_gc = 1162 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; 1163 1164 if (full) { 1165 _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes(); 1166 } 1167 } 1168 1169 void G1CollectorPolicy::print_heap_transition() { 1170 _g1->print_size_transition(gclog_or_tty, 1171 _heap_used_bytes_before_gc, 1172 _g1->used(), 1173 _g1->capacity()); 1174 } 1175 1176 void G1CollectorPolicy::print_detailed_heap_transition(bool full) { 1177 YoungList* young_list = _g1->young_list(); 1178 1179 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); 1180 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); 1181 size_t heap_used_bytes_after_gc = _g1->used(); 1182 1183 size_t heap_capacity_bytes_after_gc = _g1->capacity(); 1184 size_t eden_capacity_bytes_after_gc = 1185 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; 1186 1187 gclog_or_tty->print( 1188 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " 1189 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1190 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1191 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1192 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), 1193 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), 1194 EXT_SIZE_PARAMS(eden_used_bytes_after_gc), 1195 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), 1196 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), 1197 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), 1198 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), 1199 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), 1200 EXT_SIZE_PARAMS(heap_used_bytes_after_gc), 1201 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); 1202 1203 if (full) { 1204 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); 1205 } 1206 1207 gclog_or_tty->cr(); 1208 } 1209 1210 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1211 double update_rs_processed_buffers, 1212 double goal_ms) { 1213 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1214 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1215 1216 if (G1UseAdaptiveConcRefinement) { 1217 const int k_gy = 3, k_gr = 6; 1218 const double inc_k = 1.1, dec_k = 0.9; 1219 1220 int g = cg1r->green_zone(); 1221 if (update_rs_time > goal_ms) { 1222 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1223 } else { 1224 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1225 g = (int)MAX2(g * inc_k, g + 1.0); 1226 } 1227 } 1228 // Change the refinement threads params 1229 cg1r->set_green_zone(g); 1230 cg1r->set_yellow_zone(g * k_gy); 1231 cg1r->set_red_zone(g * k_gr); 1232 cg1r->reinitialize_threads(); 1233 1234 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1235 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1236 cg1r->yellow_zone()); 1237 // Change the barrier params 1238 dcqs.set_process_completed_threshold(processing_threshold); 1239 dcqs.set_max_completed_queue(cg1r->red_zone()); 1240 } 1241 1242 int curr_queue_size = dcqs.completed_buffers_num(); 1243 if (curr_queue_size >= cg1r->yellow_zone()) { 1244 dcqs.set_completed_queue_padding(curr_queue_size); 1245 } else { 1246 dcqs.set_completed_queue_padding(0); 1247 } 1248 dcqs.notify_if_necessary(); 1249 } 1250 1251 double 1252 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1253 size_t scanned_cards) { 1254 return 1255 predict_rs_update_time_ms(pending_cards) + 1256 predict_rs_scan_time_ms(scanned_cards) + 1257 predict_constant_other_time_ms(); 1258 } 1259 1260 double 1261 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1262 size_t rs_length = predict_rs_length_diff(); 1263 size_t card_num; 1264 if (gcs_are_young()) { 1265 card_num = predict_young_card_num(rs_length); 1266 } else { 1267 card_num = predict_non_young_card_num(rs_length); 1268 } 1269 return predict_base_elapsed_time_ms(pending_cards, card_num); 1270 } 1271 1272 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1273 size_t bytes_to_copy; 1274 if (hr->is_marked()) 1275 bytes_to_copy = hr->max_live_bytes(); 1276 else { 1277 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1278 int age = hr->age_in_surv_rate_group(); 1279 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1280 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1281 } 1282 return bytes_to_copy; 1283 } 1284 1285 double 1286 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1287 bool for_young_gc) { 1288 size_t rs_length = hr->rem_set()->occupied(); 1289 size_t card_num; 1290 1291 // Predicting the number of cards is based on which type of GC 1292 // we're predicting for. 1293 if (for_young_gc) { 1294 card_num = predict_young_card_num(rs_length); 1295 } else { 1296 card_num = predict_non_young_card_num(rs_length); 1297 } 1298 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1299 1300 double region_elapsed_time_ms = 1301 predict_rs_scan_time_ms(card_num) + 1302 predict_object_copy_time_ms(bytes_to_copy); 1303 1304 // The prediction of the "other" time for this region is based 1305 // upon the region type and NOT the GC type. 1306 if (hr->is_young()) { 1307 region_elapsed_time_ms += predict_young_other_time_ms(1); 1308 } else { 1309 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1310 } 1311 return region_elapsed_time_ms; 1312 } 1313 1314 void 1315 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1316 uint survivor_cset_region_length) { 1317 _eden_cset_region_length = eden_cset_region_length; 1318 _survivor_cset_region_length = survivor_cset_region_length; 1319 _old_cset_region_length = 0; 1320 } 1321 1322 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1323 _recorded_rs_lengths = rs_lengths; 1324 } 1325 1326 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1327 double elapsed_ms) { 1328 _recent_gc_times_ms->add(elapsed_ms); 1329 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1330 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1331 } 1332 1333 size_t G1CollectorPolicy::expansion_amount() { 1334 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1335 double threshold = _gc_overhead_perc; 1336 if (recent_gc_overhead > threshold) { 1337 // We will double the existing space, or take 1338 // G1ExpandByPercentOfAvailable % of the available expansion 1339 // space, whichever is smaller, bounded below by a minimum 1340 // expansion (unless that's all that's left.) 1341 const size_t min_expand_bytes = 1*M; 1342 size_t reserved_bytes = _g1->max_capacity(); 1343 size_t committed_bytes = _g1->capacity(); 1344 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1345 size_t expand_bytes; 1346 size_t expand_bytes_via_pct = 1347 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1348 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1349 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1350 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1351 1352 ergo_verbose5(ErgoHeapSizing, 1353 "attempt heap expansion", 1354 ergo_format_reason("recent GC overhead higher than " 1355 "threshold after GC") 1356 ergo_format_perc("recent GC overhead") 1357 ergo_format_perc("threshold") 1358 ergo_format_byte("uncommitted") 1359 ergo_format_byte_perc("calculated expansion amount"), 1360 recent_gc_overhead, threshold, 1361 uncommitted_bytes, 1362 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable); 1363 1364 return expand_bytes; 1365 } else { 1366 return 0; 1367 } 1368 } 1369 1370 void G1CollectorPolicy::print_tracing_info() const { 1371 _trace_gen0_time_data.print(); 1372 _trace_gen1_time_data.print(); 1373 } 1374 1375 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1376 #ifndef PRODUCT 1377 _short_lived_surv_rate_group->print_surv_rate_summary(); 1378 // add this call for any other surv rate groups 1379 #endif // PRODUCT 1380 } 1381 1382 uint G1CollectorPolicy::max_regions(int purpose) { 1383 switch (purpose) { 1384 case GCAllocForSurvived: 1385 return _max_survivor_regions; 1386 case GCAllocForTenured: 1387 return REGIONS_UNLIMITED; 1388 default: 1389 ShouldNotReachHere(); 1390 return REGIONS_UNLIMITED; 1391 }; 1392 } 1393 1394 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1395 uint expansion_region_num = 0; 1396 if (GCLockerEdenExpansionPercent > 0) { 1397 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1398 double expansion_region_num_d = perc * (double) _young_list_target_length; 1399 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1400 // less than 1.0) we'll get 1. 1401 expansion_region_num = (uint) ceil(expansion_region_num_d); 1402 } else { 1403 assert(expansion_region_num == 0, "sanity"); 1404 } 1405 _young_list_max_length = _young_list_target_length + expansion_region_num; 1406 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1407 } 1408 1409 // Calculates survivor space parameters. 1410 void G1CollectorPolicy::update_survivors_policy() { 1411 double max_survivor_regions_d = 1412 (double) _young_list_target_length / (double) SurvivorRatio; 1413 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1414 // smaller than 1.0) we'll get 1. 1415 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1416 1417 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1418 HeapRegion::GrainWords * _max_survivor_regions); 1419 } 1420 1421 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( 1422 GCCause::Cause gc_cause) { 1423 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1424 if (!during_cycle) { 1425 ergo_verbose1(ErgoConcCycles, 1426 "request concurrent cycle initiation", 1427 ergo_format_reason("requested by GC cause") 1428 ergo_format_str("GC cause"), 1429 GCCause::to_string(gc_cause)); 1430 set_initiate_conc_mark_if_possible(); 1431 return true; 1432 } else { 1433 ergo_verbose1(ErgoConcCycles, 1434 "do not request concurrent cycle initiation", 1435 ergo_format_reason("concurrent cycle already in progress") 1436 ergo_format_str("GC cause"), 1437 GCCause::to_string(gc_cause)); 1438 return false; 1439 } 1440 } 1441 1442 void 1443 G1CollectorPolicy::decide_on_conc_mark_initiation() { 1444 // We are about to decide on whether this pause will be an 1445 // initial-mark pause. 1446 1447 // First, during_initial_mark_pause() should not be already set. We 1448 // will set it here if we have to. However, it should be cleared by 1449 // the end of the pause (it's only set for the duration of an 1450 // initial-mark pause). 1451 assert(!during_initial_mark_pause(), "pre-condition"); 1452 1453 if (initiate_conc_mark_if_possible()) { 1454 // We had noticed on a previous pause that the heap occupancy has 1455 // gone over the initiating threshold and we should start a 1456 // concurrent marking cycle. So we might initiate one. 1457 1458 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1459 if (!during_cycle) { 1460 // The concurrent marking thread is not "during a cycle", i.e., 1461 // it has completed the last one. So we can go ahead and 1462 // initiate a new cycle. 1463 1464 set_during_initial_mark_pause(); 1465 // We do not allow mixed GCs during marking. 1466 if (!gcs_are_young()) { 1467 set_gcs_are_young(true); 1468 ergo_verbose0(ErgoMixedGCs, 1469 "end mixed GCs", 1470 ergo_format_reason("concurrent cycle is about to start")); 1471 } 1472 1473 // And we can now clear initiate_conc_mark_if_possible() as 1474 // we've already acted on it. 1475 clear_initiate_conc_mark_if_possible(); 1476 1477 ergo_verbose0(ErgoConcCycles, 1478 "initiate concurrent cycle", 1479 ergo_format_reason("concurrent cycle initiation requested")); 1480 } else { 1481 // The concurrent marking thread is still finishing up the 1482 // previous cycle. If we start one right now the two cycles 1483 // overlap. In particular, the concurrent marking thread might 1484 // be in the process of clearing the next marking bitmap (which 1485 // we will use for the next cycle if we start one). Starting a 1486 // cycle now will be bad given that parts of the marking 1487 // information might get cleared by the marking thread. And we 1488 // cannot wait for the marking thread to finish the cycle as it 1489 // periodically yields while clearing the next marking bitmap 1490 // and, if it's in a yield point, it's waiting for us to 1491 // finish. So, at this point we will not start a cycle and we'll 1492 // let the concurrent marking thread complete the last one. 1493 ergo_verbose0(ErgoConcCycles, 1494 "do not initiate concurrent cycle", 1495 ergo_format_reason("concurrent cycle already in progress")); 1496 } 1497 } 1498 } 1499 1500 class KnownGarbageClosure: public HeapRegionClosure { 1501 G1CollectedHeap* _g1h; 1502 CollectionSetChooser* _hrSorted; 1503 1504 public: 1505 KnownGarbageClosure(CollectionSetChooser* hrSorted) : 1506 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { } 1507 1508 bool doHeapRegion(HeapRegion* r) { 1509 // We only include humongous regions in collection 1510 // sets when concurrent mark shows that their contained object is 1511 // unreachable. 1512 1513 // Do we have any marking information for this region? 1514 if (r->is_marked()) { 1515 // We will skip any region that's currently used as an old GC 1516 // alloc region (we should not consider those for collection 1517 // before we fill them up). 1518 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1519 _hrSorted->add_region(r); 1520 } 1521 } 1522 return false; 1523 } 1524 }; 1525 1526 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1527 G1CollectedHeap* _g1h; 1528 CSetChooserParUpdater _cset_updater; 1529 1530 public: 1531 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1532 uint chunk_size) : 1533 _g1h(G1CollectedHeap::heap()), 1534 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1535 1536 bool doHeapRegion(HeapRegion* r) { 1537 // Do we have any marking information for this region? 1538 if (r->is_marked()) { 1539 // We will skip any region that's currently used as an old GC 1540 // alloc region (we should not consider those for collection 1541 // before we fill them up). 1542 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1543 _cset_updater.add_region(r); 1544 } 1545 } 1546 return false; 1547 } 1548 }; 1549 1550 class ParKnownGarbageTask: public AbstractGangTask { 1551 CollectionSetChooser* _hrSorted; 1552 uint _chunk_size; 1553 G1CollectedHeap* _g1; 1554 public: 1555 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : 1556 AbstractGangTask("ParKnownGarbageTask"), 1557 _hrSorted(hrSorted), _chunk_size(chunk_size), 1558 _g1(G1CollectedHeap::heap()) { } 1559 1560 void work(uint worker_id) { 1561 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1562 1563 // Back to zero for the claim value. 1564 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, 1565 _g1->workers()->active_workers(), 1566 HeapRegion::InitialClaimValue); 1567 } 1568 }; 1569 1570 void 1571 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 1572 _collectionSetChooser->clear(); 1573 1574 uint region_num = _g1->n_regions(); 1575 if (G1CollectedHeap::use_parallel_gc_threads()) { 1576 const uint OverpartitionFactor = 4; 1577 uint WorkUnit; 1578 // The use of MinChunkSize = 8 in the original code 1579 // causes some assertion failures when the total number of 1580 // region is less than 8. The code here tries to fix that. 1581 // Should the original code also be fixed? 1582 if (no_of_gc_threads > 0) { 1583 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); 1584 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), 1585 MinWorkUnit); 1586 } else { 1587 assert(no_of_gc_threads > 0, 1588 "The active gc workers should be greater than 0"); 1589 // In a product build do something reasonable to avoid a crash. 1590 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); 1591 WorkUnit = 1592 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), 1593 MinWorkUnit); 1594 } 1595 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), 1596 WorkUnit); 1597 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 1598 (int) WorkUnit); 1599 _g1->workers()->run_task(&parKnownGarbageTask); 1600 1601 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1602 "sanity check"); 1603 } else { 1604 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 1605 _g1->heap_region_iterate(&knownGarbagecl); 1606 } 1607 1608 _collectionSetChooser->sort_regions(); 1609 1610 double end_sec = os::elapsedTime(); 1611 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1612 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1613 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1614 _prev_collection_pause_end_ms += elapsed_time_ms; 1615 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); 1616 } 1617 1618 // Add the heap region at the head of the non-incremental collection set 1619 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1620 assert(_inc_cset_build_state == Active, "Precondition"); 1621 assert(!hr->is_young(), "non-incremental add of young region"); 1622 1623 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1624 hr->set_in_collection_set(true); 1625 hr->set_next_in_collection_set(_collection_set); 1626 _collection_set = hr; 1627 _collection_set_bytes_used_before += hr->used(); 1628 _g1->register_region_with_in_cset_fast_test(hr); 1629 size_t rs_length = hr->rem_set()->occupied(); 1630 _recorded_rs_lengths += rs_length; 1631 _old_cset_region_length += 1; 1632 } 1633 1634 // Initialize the per-collection-set information 1635 void G1CollectorPolicy::start_incremental_cset_building() { 1636 assert(_inc_cset_build_state == Inactive, "Precondition"); 1637 1638 _inc_cset_head = NULL; 1639 _inc_cset_tail = NULL; 1640 _inc_cset_bytes_used_before = 0; 1641 1642 _inc_cset_max_finger = 0; 1643 _inc_cset_recorded_rs_lengths = 0; 1644 _inc_cset_recorded_rs_lengths_diffs = 0; 1645 _inc_cset_predicted_elapsed_time_ms = 0.0; 1646 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1647 _inc_cset_build_state = Active; 1648 } 1649 1650 void G1CollectorPolicy::finalize_incremental_cset_building() { 1651 assert(_inc_cset_build_state == Active, "Precondition"); 1652 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1653 1654 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1655 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1656 // that adds a new region to the CSet. Further updates by the 1657 // concurrent refinement thread that samples the young RSet lengths 1658 // are accumulated in the *_diffs fields. Here we add the diffs to 1659 // the "main" fields. 1660 1661 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1662 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1663 } else { 1664 // This is defensive. The diff should in theory be always positive 1665 // as RSets can only grow between GCs. However, given that we 1666 // sample their size concurrently with other threads updating them 1667 // it's possible that we might get the wrong size back, which 1668 // could make the calculations somewhat inaccurate. 1669 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1670 if (_inc_cset_recorded_rs_lengths >= diffs) { 1671 _inc_cset_recorded_rs_lengths -= diffs; 1672 } else { 1673 _inc_cset_recorded_rs_lengths = 0; 1674 } 1675 } 1676 _inc_cset_predicted_elapsed_time_ms += 1677 _inc_cset_predicted_elapsed_time_ms_diffs; 1678 1679 _inc_cset_recorded_rs_lengths_diffs = 0; 1680 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1681 } 1682 1683 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1684 // This routine is used when: 1685 // * adding survivor regions to the incremental cset at the end of an 1686 // evacuation pause, 1687 // * adding the current allocation region to the incremental cset 1688 // when it is retired, and 1689 // * updating existing policy information for a region in the 1690 // incremental cset via young list RSet sampling. 1691 // Therefore this routine may be called at a safepoint by the 1692 // VM thread, or in-between safepoints by mutator threads (when 1693 // retiring the current allocation region) or a concurrent 1694 // refine thread (RSet sampling). 1695 1696 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1697 size_t used_bytes = hr->used(); 1698 _inc_cset_recorded_rs_lengths += rs_length; 1699 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1700 _inc_cset_bytes_used_before += used_bytes; 1701 1702 // Cache the values we have added to the aggregated informtion 1703 // in the heap region in case we have to remove this region from 1704 // the incremental collection set, or it is updated by the 1705 // rset sampling code 1706 hr->set_recorded_rs_length(rs_length); 1707 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1708 } 1709 1710 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1711 size_t new_rs_length) { 1712 // Update the CSet information that is dependent on the new RS length 1713 assert(hr->is_young(), "Precondition"); 1714 assert(!SafepointSynchronize::is_at_safepoint(), 1715 "should not be at a safepoint"); 1716 1717 // We could have updated _inc_cset_recorded_rs_lengths and 1718 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1719 // that atomically, as this code is executed by a concurrent 1720 // refinement thread, potentially concurrently with a mutator thread 1721 // allocating a new region and also updating the same fields. To 1722 // avoid the atomic operations we accumulate these updates on two 1723 // separate fields (*_diffs) and we'll just add them to the "main" 1724 // fields at the start of a GC. 1725 1726 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1727 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1728 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1729 1730 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1731 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1732 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1733 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1734 1735 hr->set_recorded_rs_length(new_rs_length); 1736 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1737 } 1738 1739 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1740 assert(hr->is_young(), "invariant"); 1741 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1742 assert(_inc_cset_build_state == Active, "Precondition"); 1743 1744 // We need to clear and set the cached recorded/cached collection set 1745 // information in the heap region here (before the region gets added 1746 // to the collection set). An individual heap region's cached values 1747 // are calculated, aggregated with the policy collection set info, 1748 // and cached in the heap region here (initially) and (subsequently) 1749 // by the Young List sampling code. 1750 1751 size_t rs_length = hr->rem_set()->occupied(); 1752 add_to_incremental_cset_info(hr, rs_length); 1753 1754 HeapWord* hr_end = hr->end(); 1755 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1756 1757 assert(!hr->in_collection_set(), "invariant"); 1758 hr->set_in_collection_set(true); 1759 assert( hr->next_in_collection_set() == NULL, "invariant"); 1760 1761 _g1->register_region_with_in_cset_fast_test(hr); 1762 } 1763 1764 // Add the region at the RHS of the incremental cset 1765 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1766 // We should only ever be appending survivors at the end of a pause 1767 assert( hr->is_survivor(), "Logic"); 1768 1769 // Do the 'common' stuff 1770 add_region_to_incremental_cset_common(hr); 1771 1772 // Now add the region at the right hand side 1773 if (_inc_cset_tail == NULL) { 1774 assert(_inc_cset_head == NULL, "invariant"); 1775 _inc_cset_head = hr; 1776 } else { 1777 _inc_cset_tail->set_next_in_collection_set(hr); 1778 } 1779 _inc_cset_tail = hr; 1780 } 1781 1782 // Add the region to the LHS of the incremental cset 1783 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1784 // Survivors should be added to the RHS at the end of a pause 1785 assert(!hr->is_survivor(), "Logic"); 1786 1787 // Do the 'common' stuff 1788 add_region_to_incremental_cset_common(hr); 1789 1790 // Add the region at the left hand side 1791 hr->set_next_in_collection_set(_inc_cset_head); 1792 if (_inc_cset_head == NULL) { 1793 assert(_inc_cset_tail == NULL, "Invariant"); 1794 _inc_cset_tail = hr; 1795 } 1796 _inc_cset_head = hr; 1797 } 1798 1799 #ifndef PRODUCT 1800 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1801 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 1802 1803 st->print_cr("\nCollection_set:"); 1804 HeapRegion* csr = list_head; 1805 while (csr != NULL) { 1806 HeapRegion* next = csr->next_in_collection_set(); 1807 assert(csr->in_collection_set(), "bad CS"); 1808 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", 1809 HR_FORMAT_PARAMS(csr), 1810 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(), 1811 csr->age_in_surv_rate_group_cond()); 1812 csr = next; 1813 } 1814 } 1815 #endif // !PRODUCT 1816 1817 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) { 1818 // Returns the given amount of reclaimable bytes (that represents 1819 // the amount of reclaimable space still to be collected) as a 1820 // percentage of the current heap capacity. 1821 size_t capacity_bytes = _g1->capacity(); 1822 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1823 } 1824 1825 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1826 const char* false_action_str) { 1827 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1828 if (cset_chooser->is_empty()) { 1829 ergo_verbose0(ErgoMixedGCs, 1830 false_action_str, 1831 ergo_format_reason("candidate old regions not available")); 1832 return false; 1833 } 1834 1835 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1836 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 1837 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1838 double threshold = (double) G1HeapWastePercent; 1839 if (reclaimable_perc <= threshold) { 1840 ergo_verbose4(ErgoMixedGCs, 1841 false_action_str, 1842 ergo_format_reason("reclaimable percentage not over threshold") 1843 ergo_format_region("candidate old regions") 1844 ergo_format_byte_perc("reclaimable") 1845 ergo_format_perc("threshold"), 1846 cset_chooser->remaining_regions(), 1847 reclaimable_bytes, 1848 reclaimable_perc, threshold); 1849 return false; 1850 } 1851 1852 ergo_verbose4(ErgoMixedGCs, 1853 true_action_str, 1854 ergo_format_reason("candidate old regions available") 1855 ergo_format_region("candidate old regions") 1856 ergo_format_byte_perc("reclaimable") 1857 ergo_format_perc("threshold"), 1858 cset_chooser->remaining_regions(), 1859 reclaimable_bytes, 1860 reclaimable_perc, threshold); 1861 return true; 1862 } 1863 1864 uint G1CollectorPolicy::calc_min_old_cset_length() { 1865 // The min old CSet region bound is based on the maximum desired 1866 // number of mixed GCs after a cycle. I.e., even if some old regions 1867 // look expensive, we should add them to the CSet anyway to make 1868 // sure we go through the available old regions in no more than the 1869 // maximum desired number of mixed GCs. 1870 // 1871 // The calculation is based on the number of marked regions we added 1872 // to the CSet chooser in the first place, not how many remain, so 1873 // that the result is the same during all mixed GCs that follow a cycle. 1874 1875 const size_t region_num = (size_t) _collectionSetChooser->length(); 1876 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1877 size_t result = region_num / gc_num; 1878 // emulate ceiling 1879 if (result * gc_num < region_num) { 1880 result += 1; 1881 } 1882 return (uint) result; 1883 } 1884 1885 uint G1CollectorPolicy::calc_max_old_cset_length() { 1886 // The max old CSet region bound is based on the threshold expressed 1887 // as a percentage of the heap size. I.e., it should bound the 1888 // number of old regions added to the CSet irrespective of how many 1889 // of them are available. 1890 1891 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1892 const size_t region_num = g1h->n_regions(); 1893 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1894 size_t result = region_num * perc / 100; 1895 // emulate ceiling 1896 if (100 * result < region_num * perc) { 1897 result += 1; 1898 } 1899 return (uint) result; 1900 } 1901 1902 1903 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { 1904 double young_start_time_sec = os::elapsedTime(); 1905 1906 YoungList* young_list = _g1->young_list(); 1907 finalize_incremental_cset_building(); 1908 1909 guarantee(target_pause_time_ms > 0.0, 1910 err_msg("target_pause_time_ms = %1.6lf should be positive", 1911 target_pause_time_ms)); 1912 guarantee(_collection_set == NULL, "Precondition"); 1913 1914 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 1915 double predicted_pause_time_ms = base_time_ms; 1916 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 1917 1918 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 1919 "start choosing CSet", 1920 ergo_format_size("_pending_cards") 1921 ergo_format_ms("predicted base time") 1922 ergo_format_ms("remaining time") 1923 ergo_format_ms("target pause time"), 1924 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 1925 1926 _last_gc_was_young = gcs_are_young() ? true : false; 1927 1928 if (_last_gc_was_young) { 1929 _trace_gen0_time_data.increment_young_collection_count(); 1930 } else { 1931 _trace_gen0_time_data.increment_mixed_collection_count(); 1932 } 1933 1934 // The young list is laid with the survivor regions from the previous 1935 // pause are appended to the RHS of the young list, i.e. 1936 // [Newly Young Regions ++ Survivors from last pause]. 1937 1938 uint survivor_region_length = young_list->survivor_length(); 1939 uint eden_region_length = young_list->length() - survivor_region_length; 1940 init_cset_region_lengths(eden_region_length, survivor_region_length); 1941 1942 HeapRegion* hr = young_list->first_survivor_region(); 1943 while (hr != NULL) { 1944 assert(hr->is_survivor(), "badly formed young list"); 1945 hr->set_young(); 1946 hr = hr->get_next_young_region(); 1947 } 1948 1949 // Clear the fields that point to the survivor list - they are all young now. 1950 young_list->clear_survivors(); 1951 1952 _collection_set = _inc_cset_head; 1953 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 1954 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 1955 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 1956 1957 ergo_verbose3(ErgoCSetConstruction | ErgoHigh, 1958 "add young regions to CSet", 1959 ergo_format_region("eden") 1960 ergo_format_region("survivors") 1961 ergo_format_ms("predicted young region time"), 1962 eden_region_length, survivor_region_length, 1963 _inc_cset_predicted_elapsed_time_ms); 1964 1965 // The number of recorded young regions is the incremental 1966 // collection set's current size 1967 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 1968 1969 double young_end_time_sec = os::elapsedTime(); 1970 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 1971 1972 // Set the start of the non-young choice time. 1973 double non_young_start_time_sec = young_end_time_sec; 1974 1975 if (!gcs_are_young()) { 1976 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1977 cset_chooser->verify(); 1978 const uint min_old_cset_length = calc_min_old_cset_length(); 1979 const uint max_old_cset_length = calc_max_old_cset_length(); 1980 1981 uint expensive_region_num = 0; 1982 bool check_time_remaining = adaptive_young_list_length(); 1983 1984 HeapRegion* hr = cset_chooser->peek(); 1985 while (hr != NULL) { 1986 if (old_cset_region_length() >= max_old_cset_length) { 1987 // Added maximum number of old regions to the CSet. 1988 ergo_verbose2(ErgoCSetConstruction, 1989 "finish adding old regions to CSet", 1990 ergo_format_reason("old CSet region num reached max") 1991 ergo_format_region("old") 1992 ergo_format_region("max"), 1993 old_cset_region_length(), max_old_cset_length); 1994 break; 1995 } 1996 1997 1998 // Stop adding regions if the remaining reclaimable space is 1999 // not above G1HeapWastePercent. 2000 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2001 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2002 double threshold = (double) G1HeapWastePercent; 2003 if (reclaimable_perc <= threshold) { 2004 // We've added enough old regions that the amount of uncollected 2005 // reclaimable space is at or below the waste threshold. Stop 2006 // adding old regions to the CSet. 2007 ergo_verbose5(ErgoCSetConstruction, 2008 "finish adding old regions to CSet", 2009 ergo_format_reason("reclaimable percentage not over threshold") 2010 ergo_format_region("old") 2011 ergo_format_region("max") 2012 ergo_format_byte_perc("reclaimable") 2013 ergo_format_perc("threshold"), 2014 old_cset_region_length(), 2015 max_old_cset_length, 2016 reclaimable_bytes, 2017 reclaimable_perc, threshold); 2018 break; 2019 } 2020 2021 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 2022 if (check_time_remaining) { 2023 if (predicted_time_ms > time_remaining_ms) { 2024 // Too expensive for the current CSet. 2025 2026 if (old_cset_region_length() >= min_old_cset_length) { 2027 // We have added the minimum number of old regions to the CSet, 2028 // we are done with this CSet. 2029 ergo_verbose4(ErgoCSetConstruction, 2030 "finish adding old regions to CSet", 2031 ergo_format_reason("predicted time is too high") 2032 ergo_format_ms("predicted time") 2033 ergo_format_ms("remaining time") 2034 ergo_format_region("old") 2035 ergo_format_region("min"), 2036 predicted_time_ms, time_remaining_ms, 2037 old_cset_region_length(), min_old_cset_length); 2038 break; 2039 } 2040 2041 // We'll add it anyway given that we haven't reached the 2042 // minimum number of old regions. 2043 expensive_region_num += 1; 2044 } 2045 } else { 2046 if (old_cset_region_length() >= min_old_cset_length) { 2047 // In the non-auto-tuning case, we'll finish adding regions 2048 // to the CSet if we reach the minimum. 2049 ergo_verbose2(ErgoCSetConstruction, 2050 "finish adding old regions to CSet", 2051 ergo_format_reason("old CSet region num reached min") 2052 ergo_format_region("old") 2053 ergo_format_region("min"), 2054 old_cset_region_length(), min_old_cset_length); 2055 break; 2056 } 2057 } 2058 2059 // We will add this region to the CSet. 2060 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2061 predicted_pause_time_ms += predicted_time_ms; 2062 cset_chooser->remove_and_move_to_next(hr); 2063 _g1->old_set_remove(hr); 2064 add_old_region_to_cset(hr); 2065 2066 hr = cset_chooser->peek(); 2067 } 2068 if (hr == NULL) { 2069 ergo_verbose0(ErgoCSetConstruction, 2070 "finish adding old regions to CSet", 2071 ergo_format_reason("candidate old regions not available")); 2072 } 2073 2074 if (expensive_region_num > 0) { 2075 // We print the information once here at the end, predicated on 2076 // whether we added any apparently expensive regions or not, to 2077 // avoid generating output per region. 2078 ergo_verbose4(ErgoCSetConstruction, 2079 "added expensive regions to CSet", 2080 ergo_format_reason("old CSet region num not reached min") 2081 ergo_format_region("old") 2082 ergo_format_region("expensive") 2083 ergo_format_region("min") 2084 ergo_format_ms("remaining time"), 2085 old_cset_region_length(), 2086 expensive_region_num, 2087 min_old_cset_length, 2088 time_remaining_ms); 2089 } 2090 2091 cset_chooser->verify(); 2092 } 2093 2094 stop_incremental_cset_building(); 2095 2096 ergo_verbose5(ErgoCSetConstruction, 2097 "finish choosing CSet", 2098 ergo_format_region("eden") 2099 ergo_format_region("survivors") 2100 ergo_format_region("old") 2101 ergo_format_ms("predicted pause time") 2102 ergo_format_ms("target pause time"), 2103 eden_region_length, survivor_region_length, 2104 old_cset_region_length(), 2105 predicted_pause_time_ms, target_pause_time_ms); 2106 2107 double non_young_end_time_sec = os::elapsedTime(); 2108 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2109 evacuation_info.set_collectionset_regions(cset_region_length()); 2110 } 2111 2112 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { 2113 if(TraceGen0Time) { 2114 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2115 } 2116 } 2117 2118 void TraceGen0TimeData::record_yield_time(double yield_time_ms) { 2119 if(TraceGen0Time) { 2120 _all_yield_times_ms.add(yield_time_ms); 2121 } 2122 } 2123 2124 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2125 if(TraceGen0Time) { 2126 _total.add(pause_time_ms); 2127 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2128 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2129 _parallel.add(phase_times->cur_collection_par_time_ms()); 2130 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time()); 2131 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms()); 2132 _update_rs.add(phase_times->average_last_update_rs_time()); 2133 _scan_rs.add(phase_times->average_last_scan_rs_time()); 2134 _obj_copy.add(phase_times->average_last_obj_copy_time()); 2135 _termination.add(phase_times->average_last_termination_time()); 2136 2137 double parallel_known_time = phase_times->average_last_ext_root_scan_time() + 2138 phase_times->average_last_satb_filtering_times_ms() + 2139 phase_times->average_last_update_rs_time() + 2140 phase_times->average_last_scan_rs_time() + 2141 phase_times->average_last_obj_copy_time() + 2142 + phase_times->average_last_termination_time(); 2143 2144 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2145 _parallel_other.add(parallel_other_time); 2146 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2147 } 2148 } 2149 2150 void TraceGen0TimeData::increment_young_collection_count() { 2151 if(TraceGen0Time) { 2152 ++_young_pause_num; 2153 } 2154 } 2155 2156 void TraceGen0TimeData::increment_mixed_collection_count() { 2157 if(TraceGen0Time) { 2158 ++_mixed_pause_num; 2159 } 2160 } 2161 2162 void TraceGen0TimeData::print_summary(const char* str, 2163 const NumberSeq* seq) const { 2164 double sum = seq->sum(); 2165 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2166 str, sum / 1000.0, seq->avg()); 2167 } 2168 2169 void TraceGen0TimeData::print_summary_sd(const char* str, 2170 const NumberSeq* seq) const { 2171 print_summary(str, seq); 2172 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2173 "(num", seq->num(), seq->sd(), seq->maximum()); 2174 } 2175 2176 void TraceGen0TimeData::print() const { 2177 if (!TraceGen0Time) { 2178 return; 2179 } 2180 2181 gclog_or_tty->print_cr("ALL PAUSES"); 2182 print_summary_sd(" Total", &_total); 2183 gclog_or_tty->print_cr(""); 2184 gclog_or_tty->print_cr(""); 2185 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2186 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2187 gclog_or_tty->print_cr(""); 2188 2189 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2190 2191 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2192 gclog_or_tty->print_cr("none"); 2193 } else { 2194 print_summary_sd(" Evacuation Pauses", &_total); 2195 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2196 print_summary(" Parallel Time", &_parallel); 2197 print_summary(" Ext Root Scanning", &_ext_root_scan); 2198 print_summary(" SATB Filtering", &_satb_filtering); 2199 print_summary(" Update RS", &_update_rs); 2200 print_summary(" Scan RS", &_scan_rs); 2201 print_summary(" Object Copy", &_obj_copy); 2202 print_summary(" Termination", &_termination); 2203 print_summary(" Parallel Other", &_parallel_other); 2204 print_summary(" Clear CT", &_clear_ct); 2205 print_summary(" Other", &_other); 2206 } 2207 gclog_or_tty->print_cr(""); 2208 2209 gclog_or_tty->print_cr("MISC"); 2210 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2211 print_summary_sd(" Yields", &_all_yield_times_ms); 2212 } 2213 2214 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) { 2215 if (TraceGen1Time) { 2216 _all_full_gc_times.add(full_gc_time_ms); 2217 } 2218 } 2219 2220 void TraceGen1TimeData::print() const { 2221 if (!TraceGen1Time) { 2222 return; 2223 } 2224 2225 if (_all_full_gc_times.num() > 0) { 2226 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2227 _all_full_gc_times.num(), 2228 _all_full_gc_times.sum() / 1000.0); 2229 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2230 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2231 _all_full_gc_times.sd(), 2232 _all_full_gc_times.maximum()); 2233 } 2234 }