1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentG1Refine.hpp" 27 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "gc_implementation/shared/gcPolicyCounters.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "utilities/debug.hpp" 40 41 // Different defaults for different number of GC threads 42 // They were chosen by running GCOld and SPECjbb on debris with different 43 // numbers of GC threads and choosing them based on the results 44 45 // all the same 46 static double rs_length_diff_defaults[] = { 47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 48 }; 49 50 static double cost_per_card_ms_defaults[] = { 51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 52 }; 53 54 // all the same 55 static double young_cards_per_entry_ratio_defaults[] = { 56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 57 }; 58 59 static double cost_per_entry_ms_defaults[] = { 60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 61 }; 62 63 static double cost_per_byte_ms_defaults[] = { 64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 65 }; 66 67 // these should be pretty consistent 68 static double constant_other_time_ms_defaults[] = { 69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 70 }; 71 72 73 static double young_other_cost_per_region_ms_defaults[] = { 74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 75 }; 76 77 static double non_young_other_cost_per_region_ms_defaults[] = { 78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 79 }; 80 81 G1CollectorPolicy::G1CollectorPolicy() : 82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 83 ? ParallelGCThreads : 1), 84 85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 86 _stop_world_start(0.0), 87 88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 92 _prev_collection_pause_end_ms(0.0), 93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _non_young_other_cost_per_region_ms_seq( 104 new TruncatedSeq(TruncatedSeqLength)), 105 106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 108 109 _pause_time_target_ms((double) MaxGCPauseMillis), 110 111 _gcs_are_young(true), 112 113 _during_marking(false), 114 _in_marking_window(false), 115 _in_marking_window_im(false), 116 117 _recent_prev_end_times_for_all_gcs_sec( 118 new TruncatedSeq(NumPrevPausesForHeuristics)), 119 120 _recent_avg_pause_time_ratio(0.0), 121 122 _initiate_conc_mark_if_possible(false), 123 _during_initial_mark_pause(false), 124 _last_young_gc(false), 125 _last_gc_was_young(false), 126 127 _eden_bytes_before_gc(0), 128 _survivor_bytes_before_gc(0), 129 _capacity_before_gc(0), 130 131 _eden_cset_region_length(0), 132 _survivor_cset_region_length(0), 133 _old_cset_region_length(0), 134 135 _collection_set(NULL), 136 _collection_set_bytes_used_before(0), 137 138 // Incremental CSet attributes 139 _inc_cset_build_state(Inactive), 140 _inc_cset_head(NULL), 141 _inc_cset_tail(NULL), 142 _inc_cset_bytes_used_before(0), 143 _inc_cset_max_finger(NULL), 144 _inc_cset_recorded_rs_lengths(0), 145 _inc_cset_recorded_rs_lengths_diffs(0), 146 _inc_cset_predicted_elapsed_time_ms(0.0), 147 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 148 149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 151 #endif // _MSC_VER 152 153 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", 154 G1YoungSurvRateNumRegionsSummary)), 155 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", 156 G1YoungSurvRateNumRegionsSummary)), 157 // add here any more surv rate groups 158 _recorded_survivor_regions(0), 159 _recorded_survivor_head(NULL), 160 _recorded_survivor_tail(NULL), 161 _survivors_age_table(true), 162 163 _gc_overhead_perc(0.0) { 164 165 // Set up the region size and associated fields. Given that the 166 // policy is created before the heap, we have to set this up here, 167 // so it's done as soon as possible. 168 HeapRegion::setup_heap_region_size(Arguments::min_heap_size()); 169 HeapRegionRemSet::setup_remset_size(); 170 171 G1ErgoVerbose::initialize(); 172 if (PrintAdaptiveSizePolicy) { 173 // Currently, we only use a single switch for all the heuristics. 174 G1ErgoVerbose::set_enabled(true); 175 // Given that we don't currently have a verboseness level 176 // parameter, we'll hardcode this to high. This can be easily 177 // changed in the future. 178 G1ErgoVerbose::set_level(ErgoHigh); 179 } else { 180 G1ErgoVerbose::set_enabled(false); 181 } 182 183 // Verify PLAB sizes 184 const size_t region_size = HeapRegion::GrainWords; 185 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 186 char buffer[128]; 187 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT, 188 OldPLABSize > region_size ? "Old" : "Young", region_size); 189 vm_exit_during_initialization(buffer); 190 } 191 192 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 193 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 194 195 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 196 197 int index = MIN2(_parallel_gc_threads - 1, 7); 198 199 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 200 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 201 _young_cards_per_entry_ratio_seq->add( 202 young_cards_per_entry_ratio_defaults[index]); 203 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 204 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 205 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 206 _young_other_cost_per_region_ms_seq->add( 207 young_other_cost_per_region_ms_defaults[index]); 208 _non_young_other_cost_per_region_ms_seq->add( 209 non_young_other_cost_per_region_ms_defaults[index]); 210 211 // Below, we might need to calculate the pause time target based on 212 // the pause interval. When we do so we are going to give G1 maximum 213 // flexibility and allow it to do pauses when it needs to. So, we'll 214 // arrange that the pause interval to be pause time target + 1 to 215 // ensure that a) the pause time target is maximized with respect to 216 // the pause interval and b) we maintain the invariant that pause 217 // time target < pause interval. If the user does not want this 218 // maximum flexibility, they will have to set the pause interval 219 // explicitly. 220 221 // First make sure that, if either parameter is set, its value is 222 // reasonable. 223 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 224 if (MaxGCPauseMillis < 1) { 225 vm_exit_during_initialization("MaxGCPauseMillis should be " 226 "greater than 0"); 227 } 228 } 229 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 230 if (GCPauseIntervalMillis < 1) { 231 vm_exit_during_initialization("GCPauseIntervalMillis should be " 232 "greater than 0"); 233 } 234 } 235 236 // Then, if the pause time target parameter was not set, set it to 237 // the default value. 238 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 239 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 240 // The default pause time target in G1 is 200ms 241 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 242 } else { 243 // We do not allow the pause interval to be set without the 244 // pause time target 245 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 246 "without setting MaxGCPauseMillis"); 247 } 248 } 249 250 // Then, if the interval parameter was not set, set it according to 251 // the pause time target (this will also deal with the case when the 252 // pause time target is the default value). 253 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 254 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 255 } 256 257 // Finally, make sure that the two parameters are consistent. 258 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 259 char buffer[256]; 260 jio_snprintf(buffer, 256, 261 "MaxGCPauseMillis (%u) should be less than " 262 "GCPauseIntervalMillis (%u)", 263 MaxGCPauseMillis, GCPauseIntervalMillis); 264 vm_exit_during_initialization(buffer); 265 } 266 267 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 268 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 269 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 270 271 uintx confidence_perc = G1ConfidencePercent; 272 // Put an artificial ceiling on this so that it's not set to a silly value. 273 if (confidence_perc > 100) { 274 confidence_perc = 100; 275 warning("G1ConfidencePercent is set to a value that is too large, " 276 "it's been updated to %u", confidence_perc); 277 } 278 _sigma = (double) confidence_perc / 100.0; 279 280 // start conservatively (around 50ms is about right) 281 _concurrent_mark_remark_times_ms->add(0.05); 282 _concurrent_mark_cleanup_times_ms->add(0.20); 283 _tenuring_threshold = MaxTenuringThreshold; 284 // _max_survivor_regions will be calculated by 285 // update_young_list_target_length() during initialization. 286 _max_survivor_regions = 0; 287 288 assert(GCTimeRatio > 0, 289 "we should have set it to a default value set_g1_gc_flags() " 290 "if a user set it to 0"); 291 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 292 293 uintx reserve_perc = G1ReservePercent; 294 // Put an artificial ceiling on this so that it's not set to a silly value. 295 if (reserve_perc > 50) { 296 reserve_perc = 50; 297 warning("G1ReservePercent is set to a value that is too large, " 298 "it's been updated to %u", reserve_perc); 299 } 300 _reserve_factor = (double) reserve_perc / 100.0; 301 // This will be set when the heap is expanded 302 // for the first time during initialization. 303 _reserve_regions = 0; 304 305 initialize_all(); 306 _collectionSetChooser = new CollectionSetChooser(); 307 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 308 } 309 310 void G1CollectorPolicy::initialize_flags() { 311 set_min_alignment(HeapRegion::GrainBytes); 312 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 313 set_max_alignment(MAX2(card_table_alignment, min_alignment())); 314 if (SurvivorRatio < 1) { 315 vm_exit_during_initialization("Invalid survivor ratio specified"); 316 } 317 CollectorPolicy::initialize_flags(); 318 } 319 320 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) { 321 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max"); 322 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds"); 323 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds"); 324 325 if (FLAG_IS_CMDLINE(NewRatio)) { 326 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 327 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 328 } else { 329 _sizer_kind = SizerNewRatio; 330 _adaptive_size = false; 331 return; 332 } 333 } 334 335 if (FLAG_IS_CMDLINE(NewSize)) { 336 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 337 1U); 338 if (FLAG_IS_CMDLINE(MaxNewSize)) { 339 _max_desired_young_length = 340 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 341 1U); 342 _sizer_kind = SizerMaxAndNewSize; 343 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 344 } else { 345 _sizer_kind = SizerNewSizeOnly; 346 } 347 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 348 _max_desired_young_length = 349 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 350 1U); 351 _sizer_kind = SizerMaxNewSizeOnly; 352 } 353 } 354 355 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 356 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 357 return MAX2(1U, default_value); 358 } 359 360 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 361 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 362 return MAX2(1U, default_value); 363 } 364 365 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 366 assert(new_number_of_heap_regions > 0, "Heap must be initialized"); 367 368 switch (_sizer_kind) { 369 case SizerDefaults: 370 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 371 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 372 break; 373 case SizerNewSizeOnly: 374 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 375 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length); 376 break; 377 case SizerMaxNewSizeOnly: 378 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 379 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length); 380 break; 381 case SizerMaxAndNewSize: 382 // Do nothing. Values set on the command line, don't update them at runtime. 383 break; 384 case SizerNewRatio: 385 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1); 386 _max_desired_young_length = _min_desired_young_length; 387 break; 388 default: 389 ShouldNotReachHere(); 390 } 391 392 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values"); 393 } 394 395 void G1CollectorPolicy::init() { 396 // Set aside an initial future to_space. 397 _g1 = G1CollectedHeap::heap(); 398 399 assert(Heap_lock->owned_by_self(), "Locking discipline."); 400 401 initialize_gc_policy_counters(); 402 403 if (adaptive_young_list_length()) { 404 _young_list_fixed_length = 0; 405 } else { 406 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 407 } 408 _free_regions_at_end_of_collection = _g1->free_regions(); 409 update_young_list_target_length(); 410 411 // We may immediately start allocating regions and placing them on the 412 // collection set list. Initialize the per-collection set info 413 start_incremental_cset_building(); 414 } 415 416 // Create the jstat counters for the policy. 417 void G1CollectorPolicy::initialize_gc_policy_counters() { 418 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 419 } 420 421 bool G1CollectorPolicy::predict_will_fit(uint young_length, 422 double base_time_ms, 423 uint base_free_regions, 424 double target_pause_time_ms) { 425 if (young_length >= base_free_regions) { 426 // end condition 1: not enough space for the young regions 427 return false; 428 } 429 430 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 431 size_t bytes_to_copy = 432 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 433 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 434 double young_other_time_ms = predict_young_other_time_ms(young_length); 435 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 436 if (pause_time_ms > target_pause_time_ms) { 437 // end condition 2: prediction is over the target pause time 438 return false; 439 } 440 441 size_t free_bytes = 442 (base_free_regions - young_length) * HeapRegion::GrainBytes; 443 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { 444 // end condition 3: out-of-space (conservatively!) 445 return false; 446 } 447 448 // success! 449 return true; 450 } 451 452 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 453 // re-calculate the necessary reserve 454 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 455 // We use ceiling so that if reserve_regions_d is > 0.0 (but 456 // smaller than 1.0) we'll get 1. 457 _reserve_regions = (uint) ceil(reserve_regions_d); 458 459 _young_gen_sizer->heap_size_changed(new_number_of_regions); 460 } 461 462 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 463 uint base_min_length) { 464 uint desired_min_length = 0; 465 if (adaptive_young_list_length()) { 466 if (_alloc_rate_ms_seq->num() > 3) { 467 double now_sec = os::elapsedTime(); 468 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 469 double alloc_rate_ms = predict_alloc_rate_ms(); 470 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 471 } else { 472 // otherwise we don't have enough info to make the prediction 473 } 474 } 475 desired_min_length += base_min_length; 476 // make sure we don't go below any user-defined minimum bound 477 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 478 } 479 480 uint G1CollectorPolicy::calculate_young_list_desired_max_length() { 481 // Here, we might want to also take into account any additional 482 // constraints (i.e., user-defined minimum bound). Currently, we 483 // effectively don't set this bound. 484 return _young_gen_sizer->max_desired_young_length(); 485 } 486 487 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 488 if (rs_lengths == (size_t) -1) { 489 // if it's set to the default value (-1), we should predict it; 490 // otherwise, use the given value. 491 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 492 } 493 494 // Calculate the absolute and desired min bounds. 495 496 // This is how many young regions we already have (currently: the survivors). 497 uint base_min_length = recorded_survivor_regions(); 498 // This is the absolute minimum young length, which ensures that we 499 // can allocate one eden region in the worst-case. 500 uint absolute_min_length = base_min_length + 1; 501 uint desired_min_length = 502 calculate_young_list_desired_min_length(base_min_length); 503 if (desired_min_length < absolute_min_length) { 504 desired_min_length = absolute_min_length; 505 } 506 507 // Calculate the absolute and desired max bounds. 508 509 // We will try our best not to "eat" into the reserve. 510 uint absolute_max_length = 0; 511 if (_free_regions_at_end_of_collection > _reserve_regions) { 512 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 513 } 514 uint desired_max_length = calculate_young_list_desired_max_length(); 515 if (desired_max_length > absolute_max_length) { 516 desired_max_length = absolute_max_length; 517 } 518 519 uint young_list_target_length = 0; 520 if (adaptive_young_list_length()) { 521 if (gcs_are_young()) { 522 young_list_target_length = 523 calculate_young_list_target_length(rs_lengths, 524 base_min_length, 525 desired_min_length, 526 desired_max_length); 527 _rs_lengths_prediction = rs_lengths; 528 } else { 529 // Don't calculate anything and let the code below bound it to 530 // the desired_min_length, i.e., do the next GC as soon as 531 // possible to maximize how many old regions we can add to it. 532 } 533 } else { 534 // The user asked for a fixed young gen so we'll fix the young gen 535 // whether the next GC is young or mixed. 536 young_list_target_length = _young_list_fixed_length; 537 } 538 539 // Make sure we don't go over the desired max length, nor under the 540 // desired min length. In case they clash, desired_min_length wins 541 // which is why that test is second. 542 if (young_list_target_length > desired_max_length) { 543 young_list_target_length = desired_max_length; 544 } 545 if (young_list_target_length < desired_min_length) { 546 young_list_target_length = desired_min_length; 547 } 548 549 assert(young_list_target_length > recorded_survivor_regions(), 550 "we should be able to allocate at least one eden region"); 551 assert(young_list_target_length >= absolute_min_length, "post-condition"); 552 _young_list_target_length = young_list_target_length; 553 554 update_max_gc_locker_expansion(); 555 } 556 557 uint 558 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 559 uint base_min_length, 560 uint desired_min_length, 561 uint desired_max_length) { 562 assert(adaptive_young_list_length(), "pre-condition"); 563 assert(gcs_are_young(), "only call this for young GCs"); 564 565 // In case some edge-condition makes the desired max length too small... 566 if (desired_max_length <= desired_min_length) { 567 return desired_min_length; 568 } 569 570 // We'll adjust min_young_length and max_young_length not to include 571 // the already allocated young regions (i.e., so they reflect the 572 // min and max eden regions we'll allocate). The base_min_length 573 // will be reflected in the predictions by the 574 // survivor_regions_evac_time prediction. 575 assert(desired_min_length > base_min_length, "invariant"); 576 uint min_young_length = desired_min_length - base_min_length; 577 assert(desired_max_length > base_min_length, "invariant"); 578 uint max_young_length = desired_max_length - base_min_length; 579 580 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 581 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 582 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 583 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 584 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 585 double base_time_ms = 586 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 587 survivor_regions_evac_time; 588 uint available_free_regions = _free_regions_at_end_of_collection; 589 uint base_free_regions = 0; 590 if (available_free_regions > _reserve_regions) { 591 base_free_regions = available_free_regions - _reserve_regions; 592 } 593 594 // Here, we will make sure that the shortest young length that 595 // makes sense fits within the target pause time. 596 597 if (predict_will_fit(min_young_length, base_time_ms, 598 base_free_regions, target_pause_time_ms)) { 599 // The shortest young length will fit into the target pause time; 600 // we'll now check whether the absolute maximum number of young 601 // regions will fit in the target pause time. If not, we'll do 602 // a binary search between min_young_length and max_young_length. 603 if (predict_will_fit(max_young_length, base_time_ms, 604 base_free_regions, target_pause_time_ms)) { 605 // The maximum young length will fit into the target pause time. 606 // We are done so set min young length to the maximum length (as 607 // the result is assumed to be returned in min_young_length). 608 min_young_length = max_young_length; 609 } else { 610 // The maximum possible number of young regions will not fit within 611 // the target pause time so we'll search for the optimal 612 // length. The loop invariants are: 613 // 614 // min_young_length < max_young_length 615 // min_young_length is known to fit into the target pause time 616 // max_young_length is known not to fit into the target pause time 617 // 618 // Going into the loop we know the above hold as we've just 619 // checked them. Every time around the loop we check whether 620 // the middle value between min_young_length and 621 // max_young_length fits into the target pause time. If it 622 // does, it becomes the new min. If it doesn't, it becomes 623 // the new max. This way we maintain the loop invariants. 624 625 assert(min_young_length < max_young_length, "invariant"); 626 uint diff = (max_young_length - min_young_length) / 2; 627 while (diff > 0) { 628 uint young_length = min_young_length + diff; 629 if (predict_will_fit(young_length, base_time_ms, 630 base_free_regions, target_pause_time_ms)) { 631 min_young_length = young_length; 632 } else { 633 max_young_length = young_length; 634 } 635 assert(min_young_length < max_young_length, "invariant"); 636 diff = (max_young_length - min_young_length) / 2; 637 } 638 // The results is min_young_length which, according to the 639 // loop invariants, should fit within the target pause time. 640 641 // These are the post-conditions of the binary search above: 642 assert(min_young_length < max_young_length, 643 "otherwise we should have discovered that max_young_length " 644 "fits into the pause target and not done the binary search"); 645 assert(predict_will_fit(min_young_length, base_time_ms, 646 base_free_regions, target_pause_time_ms), 647 "min_young_length, the result of the binary search, should " 648 "fit into the pause target"); 649 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 650 base_free_regions, target_pause_time_ms), 651 "min_young_length, the result of the binary search, should be " 652 "optimal, so no larger length should fit into the pause target"); 653 } 654 } else { 655 // Even the minimum length doesn't fit into the pause time 656 // target, return it as the result nevertheless. 657 } 658 return base_min_length + min_young_length; 659 } 660 661 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 662 double survivor_regions_evac_time = 0.0; 663 for (HeapRegion * r = _recorded_survivor_head; 664 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 665 r = r->get_next_young_region()) { 666 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); 667 } 668 return survivor_regions_evac_time; 669 } 670 671 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 672 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 673 674 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 675 if (rs_lengths > _rs_lengths_prediction) { 676 // add 10% to avoid having to recalculate often 677 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 678 update_young_list_target_length(rs_lengths_prediction); 679 } 680 } 681 682 683 684 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 685 bool is_tlab, 686 bool* gc_overhead_limit_was_exceeded) { 687 guarantee(false, "Not using this policy feature yet."); 688 return NULL; 689 } 690 691 // This method controls how a collector handles one or more 692 // of its generations being fully allocated. 693 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 694 bool is_tlab) { 695 guarantee(false, "Not using this policy feature yet."); 696 return NULL; 697 } 698 699 700 #ifndef PRODUCT 701 bool G1CollectorPolicy::verify_young_ages() { 702 HeapRegion* head = _g1->young_list()->first_region(); 703 return 704 verify_young_ages(head, _short_lived_surv_rate_group); 705 // also call verify_young_ages on any additional surv rate groups 706 } 707 708 bool 709 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 710 SurvRateGroup *surv_rate_group) { 711 guarantee( surv_rate_group != NULL, "pre-condition" ); 712 713 const char* name = surv_rate_group->name(); 714 bool ret = true; 715 int prev_age = -1; 716 717 for (HeapRegion* curr = head; 718 curr != NULL; 719 curr = curr->get_next_young_region()) { 720 SurvRateGroup* group = curr->surv_rate_group(); 721 if (group == NULL && !curr->is_survivor()) { 722 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 723 ret = false; 724 } 725 726 if (surv_rate_group == group) { 727 int age = curr->age_in_surv_rate_group(); 728 729 if (age < 0) { 730 gclog_or_tty->print_cr("## %s: encountered negative age", name); 731 ret = false; 732 } 733 734 if (age <= prev_age) { 735 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 736 "(%d, %d)", name, age, prev_age); 737 ret = false; 738 } 739 prev_age = age; 740 } 741 } 742 743 return ret; 744 } 745 #endif // PRODUCT 746 747 void G1CollectorPolicy::record_full_collection_start() { 748 _full_collection_start_sec = os::elapsedTime(); 749 record_heap_size_info_at_start(); 750 // Release the future to-space so that it is available for compaction into. 751 _g1->set_full_collection(); 752 } 753 754 void G1CollectorPolicy::record_full_collection_end() { 755 // Consider this like a collection pause for the purposes of allocation 756 // since last pause. 757 double end_sec = os::elapsedTime(); 758 double full_gc_time_sec = end_sec - _full_collection_start_sec; 759 double full_gc_time_ms = full_gc_time_sec * 1000.0; 760 761 _trace_gen1_time_data.record_full_collection(full_gc_time_ms); 762 763 update_recent_gc_times(end_sec, full_gc_time_ms); 764 765 _g1->clear_full_collection(); 766 767 // "Nuke" the heuristics that control the young/mixed GC 768 // transitions and make sure we start with young GCs after the Full GC. 769 set_gcs_are_young(true); 770 _last_young_gc = false; 771 clear_initiate_conc_mark_if_possible(); 772 clear_during_initial_mark_pause(); 773 _in_marking_window = false; 774 _in_marking_window_im = false; 775 776 _short_lived_surv_rate_group->start_adding_regions(); 777 // also call this on any additional surv rate groups 778 779 record_survivor_regions(0, NULL, NULL); 780 781 _free_regions_at_end_of_collection = _g1->free_regions(); 782 // Reset survivors SurvRateGroup. 783 _survivor_surv_rate_group->reset(); 784 update_young_list_target_length(); 785 _collectionSetChooser->clear(); 786 } 787 788 void G1CollectorPolicy::record_stop_world_start() { 789 _stop_world_start = os::elapsedTime(); 790 } 791 792 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 793 // We only need to do this here as the policy will only be applied 794 // to the GC we're about to start. so, no point is calculating this 795 // every time we calculate / recalculate the target young length. 796 update_survivors_policy(); 797 798 assert(_g1->used() == _g1->recalculate_used(), 799 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 800 _g1->used(), _g1->recalculate_used())); 801 802 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 803 _trace_gen0_time_data.record_start_collection(s_w_t_ms); 804 _stop_world_start = 0.0; 805 806 record_heap_size_info_at_start(); 807 808 phase_times()->record_cur_collection_start_sec(start_time_sec); 809 _pending_cards = _g1->pending_card_num(); 810 811 _collection_set_bytes_used_before = 0; 812 _bytes_copied_during_gc = 0; 813 814 _last_gc_was_young = false; 815 816 // do that for any other surv rate groups 817 _short_lived_surv_rate_group->stop_adding_regions(); 818 _survivors_age_table.clear(); 819 820 assert( verify_young_ages(), "region age verification" ); 821 } 822 823 void G1CollectorPolicy::record_concurrent_mark_init_end(double 824 mark_init_elapsed_time_ms) { 825 _during_marking = true; 826 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 827 clear_during_initial_mark_pause(); 828 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 829 } 830 831 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 832 _mark_remark_start_sec = os::elapsedTime(); 833 _during_marking = false; 834 } 835 836 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 837 double end_time_sec = os::elapsedTime(); 838 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 839 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 840 _cur_mark_stop_world_time_ms += elapsed_time_ms; 841 _prev_collection_pause_end_ms += elapsed_time_ms; 842 843 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 844 } 845 846 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 847 _mark_cleanup_start_sec = os::elapsedTime(); 848 } 849 850 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 851 _last_young_gc = true; 852 _in_marking_window = false; 853 } 854 855 void G1CollectorPolicy::record_concurrent_pause() { 856 if (_stop_world_start > 0.0) { 857 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 858 _trace_gen0_time_data.record_yield_time(yield_ms); 859 } 860 } 861 862 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 863 if (_g1->concurrent_mark()->cmThread()->during_cycle()) { 864 return false; 865 } 866 867 size_t marking_initiating_used_threshold = 868 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 869 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 870 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 871 872 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { 873 if (gcs_are_young()) { 874 ergo_verbose5(ErgoConcCycles, 875 "request concurrent cycle initiation", 876 ergo_format_reason("occupancy higher than threshold") 877 ergo_format_byte("occupancy") 878 ergo_format_byte("allocation request") 879 ergo_format_byte_perc("threshold") 880 ergo_format_str("source"), 881 cur_used_bytes, 882 alloc_byte_size, 883 marking_initiating_used_threshold, 884 (double) InitiatingHeapOccupancyPercent, 885 source); 886 return true; 887 } else { 888 ergo_verbose5(ErgoConcCycles, 889 "do not request concurrent cycle initiation", 890 ergo_format_reason("still doing mixed collections") 891 ergo_format_byte("occupancy") 892 ergo_format_byte("allocation request") 893 ergo_format_byte_perc("threshold") 894 ergo_format_str("source"), 895 cur_used_bytes, 896 alloc_byte_size, 897 marking_initiating_used_threshold, 898 (double) InitiatingHeapOccupancyPercent, 899 source); 900 } 901 } 902 903 return false; 904 } 905 906 // Anything below that is considered to be zero 907 #define MIN_TIMER_GRANULARITY 0.0000001 908 909 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { 910 double end_time_sec = os::elapsedTime(); 911 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), 912 "otherwise, the subtraction below does not make sense"); 913 size_t rs_size = 914 _cur_collection_pause_used_regions_at_start - cset_region_length(); 915 size_t cur_used_bytes = _g1->used(); 916 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 917 bool last_pause_included_initial_mark = false; 918 bool update_stats = !_g1->evacuation_failed(); 919 920 #ifndef PRODUCT 921 if (G1YoungSurvRateVerbose) { 922 gclog_or_tty->print_cr(""); 923 _short_lived_surv_rate_group->print(); 924 // do that for any other surv rate groups too 925 } 926 #endif // PRODUCT 927 928 last_pause_included_initial_mark = during_initial_mark_pause(); 929 if (last_pause_included_initial_mark) { 930 record_concurrent_mark_init_end(0.0); 931 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) { 932 // Note: this might have already been set, if during the last 933 // pause we decided to start a cycle but at the beginning of 934 // this pause we decided to postpone it. That's OK. 935 set_initiate_conc_mark_if_possible(); 936 } 937 938 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, 939 end_time_sec, false); 940 941 size_t freed_bytes = 942 _cur_collection_pause_used_at_start_bytes - cur_used_bytes; 943 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes; 944 945 double survival_fraction = 946 (double)surviving_bytes/ 947 (double)_collection_set_bytes_used_before; 948 949 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); 950 evacuation_info.set_bytes_copied(_bytes_copied_during_gc); 951 952 if (update_stats) { 953 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); 954 // this is where we update the allocation rate of the application 955 double app_time_ms = 956 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 957 if (app_time_ms < MIN_TIMER_GRANULARITY) { 958 // This usually happens due to the timer not having the required 959 // granularity. Some Linuxes are the usual culprits. 960 // We'll just set it to something (arbitrarily) small. 961 app_time_ms = 1.0; 962 } 963 // We maintain the invariant that all objects allocated by mutator 964 // threads will be allocated out of eden regions. So, we can use 965 // the eden region number allocated since the previous GC to 966 // calculate the application's allocate rate. The only exception 967 // to that is humongous objects that are allocated separately. But 968 // given that humongous object allocations do not really affect 969 // either the pause's duration nor when the next pause will take 970 // place we can safely ignore them here. 971 uint regions_allocated = eden_cset_region_length(); 972 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 973 _alloc_rate_ms_seq->add(alloc_rate_ms); 974 975 double interval_ms = 976 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 977 update_recent_gc_times(end_time_sec, pause_time_ms); 978 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 979 if (recent_avg_pause_time_ratio() < 0.0 || 980 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 981 #ifndef PRODUCT 982 // Dump info to allow post-facto debugging 983 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 984 gclog_or_tty->print_cr("-------------------------------------------"); 985 gclog_or_tty->print_cr("Recent GC Times (ms):"); 986 _recent_gc_times_ms->dump(); 987 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 988 _recent_prev_end_times_for_all_gcs_sec->dump(); 989 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 990 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 991 // In debug mode, terminate the JVM if the user wants to debug at this point. 992 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 993 #endif // !PRODUCT 994 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 995 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 996 if (_recent_avg_pause_time_ratio < 0.0) { 997 _recent_avg_pause_time_ratio = 0.0; 998 } else { 999 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1000 _recent_avg_pause_time_ratio = 1.0; 1001 } 1002 } 1003 } 1004 bool new_in_marking_window = _in_marking_window; 1005 bool new_in_marking_window_im = false; 1006 if (during_initial_mark_pause()) { 1007 new_in_marking_window = true; 1008 new_in_marking_window_im = true; 1009 } 1010 1011 if (_last_young_gc) { 1012 // This is supposed to to be the "last young GC" before we start 1013 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1014 1015 if (!last_pause_included_initial_mark) { 1016 if (next_gc_should_be_mixed("start mixed GCs", 1017 "do not start mixed GCs")) { 1018 set_gcs_are_young(false); 1019 } 1020 } else { 1021 ergo_verbose0(ErgoMixedGCs, 1022 "do not start mixed GCs", 1023 ergo_format_reason("concurrent cycle is about to start")); 1024 } 1025 _last_young_gc = false; 1026 } 1027 1028 if (!_last_gc_was_young) { 1029 // This is a mixed GC. Here we decide whether to continue doing 1030 // mixed GCs or not. 1031 1032 if (!next_gc_should_be_mixed("continue mixed GCs", 1033 "do not continue mixed GCs")) { 1034 set_gcs_are_young(true); 1035 } 1036 } 1037 1038 _short_lived_surv_rate_group->start_adding_regions(); 1039 // do that for any other surv rate groupsx 1040 1041 if (update_stats) { 1042 double cost_per_card_ms = 0.0; 1043 if (_pending_cards > 0) { 1044 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards; 1045 _cost_per_card_ms_seq->add(cost_per_card_ms); 1046 } 1047 1048 size_t cards_scanned = _g1->cards_scanned(); 1049 1050 double cost_per_entry_ms = 0.0; 1051 if (cards_scanned > 10) { 1052 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; 1053 if (_last_gc_was_young) { 1054 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1055 } else { 1056 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1057 } 1058 } 1059 1060 if (_max_rs_lengths > 0) { 1061 double cards_per_entry_ratio = 1062 (double) cards_scanned / (double) _max_rs_lengths; 1063 if (_last_gc_was_young) { 1064 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1065 } else { 1066 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1067 } 1068 } 1069 1070 // This is defensive. For a while _max_rs_lengths could get 1071 // smaller than _recorded_rs_lengths which was causing 1072 // rs_length_diff to get very large and mess up the RSet length 1073 // predictions. The reason was unsafe concurrent updates to the 1074 // _inc_cset_recorded_rs_lengths field which the code below guards 1075 // against (see CR 7118202). This bug has now been fixed (see CR 1076 // 7119027). However, I'm still worried that 1077 // _inc_cset_recorded_rs_lengths might still end up somewhat 1078 // inaccurate. The concurrent refinement thread calculates an 1079 // RSet's length concurrently with other CR threads updating it 1080 // which might cause it to calculate the length incorrectly (if, 1081 // say, it's in mid-coarsening). So I'll leave in the defensive 1082 // conditional below just in case. 1083 size_t rs_length_diff = 0; 1084 if (_max_rs_lengths > _recorded_rs_lengths) { 1085 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1086 } 1087 _rs_length_diff_seq->add((double) rs_length_diff); 1088 1089 size_t copied_bytes = surviving_bytes; 1090 double cost_per_byte_ms = 0.0; 1091 if (copied_bytes > 0) { 1092 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; 1093 if (_in_marking_window) { 1094 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1095 } else { 1096 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1097 } 1098 } 1099 1100 double all_other_time_ms = pause_time_ms - 1101 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time() 1102 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time()); 1103 1104 double young_other_time_ms = 0.0; 1105 if (young_cset_region_length() > 0) { 1106 young_other_time_ms = 1107 phase_times()->young_cset_choice_time_ms() + 1108 phase_times()->young_free_cset_time_ms(); 1109 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1110 (double) young_cset_region_length()); 1111 } 1112 double non_young_other_time_ms = 0.0; 1113 if (old_cset_region_length() > 0) { 1114 non_young_other_time_ms = 1115 phase_times()->non_young_cset_choice_time_ms() + 1116 phase_times()->non_young_free_cset_time_ms(); 1117 1118 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1119 (double) old_cset_region_length()); 1120 } 1121 1122 double constant_other_time_ms = all_other_time_ms - 1123 (young_other_time_ms + non_young_other_time_ms); 1124 _constant_other_time_ms_seq->add(constant_other_time_ms); 1125 1126 double survival_ratio = 0.0; 1127 if (_collection_set_bytes_used_before > 0) { 1128 survival_ratio = (double) _bytes_copied_during_gc / 1129 (double) _collection_set_bytes_used_before; 1130 } 1131 1132 _pending_cards_seq->add((double) _pending_cards); 1133 _rs_lengths_seq->add((double) _max_rs_lengths); 1134 } 1135 1136 _in_marking_window = new_in_marking_window; 1137 _in_marking_window_im = new_in_marking_window_im; 1138 _free_regions_at_end_of_collection = _g1->free_regions(); 1139 update_young_list_target_length(); 1140 1141 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1142 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1143 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), 1144 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms); 1145 1146 _collectionSetChooser->verify(); 1147 } 1148 1149 #define EXT_SIZE_FORMAT "%.1f%s" 1150 #define EXT_SIZE_PARAMS(bytes) \ 1151 byte_size_in_proper_unit((double)(bytes)), \ 1152 proper_unit_for_byte_size((bytes)) 1153 1154 void G1CollectorPolicy::record_heap_size_info_at_start() { 1155 YoungList* young_list = _g1->young_list(); 1156 _eden_bytes_before_gc = young_list->eden_used_bytes(); 1157 _survivor_bytes_before_gc = young_list->survivor_used_bytes(); 1158 _capacity_before_gc = _g1->capacity(); 1159 1160 _cur_collection_pause_used_at_start_bytes = _g1->used(); 1161 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 1162 1163 size_t eden_capacity_before_gc = 1164 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc; 1165 1166 _prev_eden_capacity = eden_capacity_before_gc; 1167 } 1168 1169 void G1CollectorPolicy::print_heap_transition() { 1170 _g1->print_size_transition(gclog_or_tty, 1171 _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity()); 1172 } 1173 1174 void G1CollectorPolicy::print_detailed_heap_transition() { 1175 YoungList* young_list = _g1->young_list(); 1176 size_t eden_bytes = young_list->eden_used_bytes(); 1177 size_t survivor_bytes = young_list->survivor_used_bytes(); 1178 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes; 1179 size_t used = _g1->used(); 1180 size_t capacity = _g1->capacity(); 1181 size_t eden_capacity = 1182 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes; 1183 1184 gclog_or_tty->print_cr( 1185 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " 1186 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1187 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1188 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1189 EXT_SIZE_PARAMS(_eden_bytes_before_gc), 1190 EXT_SIZE_PARAMS(_prev_eden_capacity), 1191 EXT_SIZE_PARAMS(eden_bytes), 1192 EXT_SIZE_PARAMS(eden_capacity), 1193 EXT_SIZE_PARAMS(_survivor_bytes_before_gc), 1194 EXT_SIZE_PARAMS(survivor_bytes), 1195 EXT_SIZE_PARAMS(used_before_gc), 1196 EXT_SIZE_PARAMS(_capacity_before_gc), 1197 EXT_SIZE_PARAMS(used), 1198 EXT_SIZE_PARAMS(capacity)); 1199 } 1200 1201 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1202 double update_rs_processed_buffers, 1203 double goal_ms) { 1204 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1205 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1206 1207 if (G1UseAdaptiveConcRefinement) { 1208 const int k_gy = 3, k_gr = 6; 1209 const double inc_k = 1.1, dec_k = 0.9; 1210 1211 int g = cg1r->green_zone(); 1212 if (update_rs_time > goal_ms) { 1213 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1214 } else { 1215 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1216 g = (int)MAX2(g * inc_k, g + 1.0); 1217 } 1218 } 1219 // Change the refinement threads params 1220 cg1r->set_green_zone(g); 1221 cg1r->set_yellow_zone(g * k_gy); 1222 cg1r->set_red_zone(g * k_gr); 1223 cg1r->reinitialize_threads(); 1224 1225 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1226 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1227 cg1r->yellow_zone()); 1228 // Change the barrier params 1229 dcqs.set_process_completed_threshold(processing_threshold); 1230 dcqs.set_max_completed_queue(cg1r->red_zone()); 1231 } 1232 1233 int curr_queue_size = dcqs.completed_buffers_num(); 1234 if (curr_queue_size >= cg1r->yellow_zone()) { 1235 dcqs.set_completed_queue_padding(curr_queue_size); 1236 } else { 1237 dcqs.set_completed_queue_padding(0); 1238 } 1239 dcqs.notify_if_necessary(); 1240 } 1241 1242 double 1243 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1244 size_t scanned_cards) { 1245 return 1246 predict_rs_update_time_ms(pending_cards) + 1247 predict_rs_scan_time_ms(scanned_cards) + 1248 predict_constant_other_time_ms(); 1249 } 1250 1251 double 1252 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1253 size_t rs_length = predict_rs_length_diff(); 1254 size_t card_num; 1255 if (gcs_are_young()) { 1256 card_num = predict_young_card_num(rs_length); 1257 } else { 1258 card_num = predict_non_young_card_num(rs_length); 1259 } 1260 return predict_base_elapsed_time_ms(pending_cards, card_num); 1261 } 1262 1263 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1264 size_t bytes_to_copy; 1265 if (hr->is_marked()) 1266 bytes_to_copy = hr->max_live_bytes(); 1267 else { 1268 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1269 int age = hr->age_in_surv_rate_group(); 1270 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1271 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1272 } 1273 return bytes_to_copy; 1274 } 1275 1276 double 1277 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1278 bool for_young_gc) { 1279 size_t rs_length = hr->rem_set()->occupied(); 1280 size_t card_num; 1281 1282 // Predicting the number of cards is based on which type of GC 1283 // we're predicting for. 1284 if (for_young_gc) { 1285 card_num = predict_young_card_num(rs_length); 1286 } else { 1287 card_num = predict_non_young_card_num(rs_length); 1288 } 1289 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1290 1291 double region_elapsed_time_ms = 1292 predict_rs_scan_time_ms(card_num) + 1293 predict_object_copy_time_ms(bytes_to_copy); 1294 1295 // The prediction of the "other" time for this region is based 1296 // upon the region type and NOT the GC type. 1297 if (hr->is_young()) { 1298 region_elapsed_time_ms += predict_young_other_time_ms(1); 1299 } else { 1300 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1301 } 1302 return region_elapsed_time_ms; 1303 } 1304 1305 void 1306 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1307 uint survivor_cset_region_length) { 1308 _eden_cset_region_length = eden_cset_region_length; 1309 _survivor_cset_region_length = survivor_cset_region_length; 1310 _old_cset_region_length = 0; 1311 } 1312 1313 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1314 _recorded_rs_lengths = rs_lengths; 1315 } 1316 1317 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1318 double elapsed_ms) { 1319 _recent_gc_times_ms->add(elapsed_ms); 1320 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1321 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1322 } 1323 1324 size_t G1CollectorPolicy::expansion_amount() { 1325 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1326 double threshold = _gc_overhead_perc; 1327 if (recent_gc_overhead > threshold) { 1328 // We will double the existing space, or take 1329 // G1ExpandByPercentOfAvailable % of the available expansion 1330 // space, whichever is smaller, bounded below by a minimum 1331 // expansion (unless that's all that's left.) 1332 const size_t min_expand_bytes = 1*M; 1333 size_t reserved_bytes = _g1->max_capacity(); 1334 size_t committed_bytes = _g1->capacity(); 1335 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1336 size_t expand_bytes; 1337 size_t expand_bytes_via_pct = 1338 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1339 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1340 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1341 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1342 1343 ergo_verbose5(ErgoHeapSizing, 1344 "attempt heap expansion", 1345 ergo_format_reason("recent GC overhead higher than " 1346 "threshold after GC") 1347 ergo_format_perc("recent GC overhead") 1348 ergo_format_perc("threshold") 1349 ergo_format_byte("uncommitted") 1350 ergo_format_byte_perc("calculated expansion amount"), 1351 recent_gc_overhead, threshold, 1352 uncommitted_bytes, 1353 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable); 1354 1355 return expand_bytes; 1356 } else { 1357 return 0; 1358 } 1359 } 1360 1361 void G1CollectorPolicy::print_tracing_info() const { 1362 _trace_gen0_time_data.print(); 1363 _trace_gen1_time_data.print(); 1364 } 1365 1366 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1367 #ifndef PRODUCT 1368 _short_lived_surv_rate_group->print_surv_rate_summary(); 1369 // add this call for any other surv rate groups 1370 #endif // PRODUCT 1371 } 1372 1373 #ifndef PRODUCT 1374 // for debugging, bit of a hack... 1375 static char* 1376 region_num_to_mbs(int length) { 1377 static char buffer[64]; 1378 double bytes = (double) (length * HeapRegion::GrainBytes); 1379 double mbs = bytes / (double) (1024 * 1024); 1380 sprintf(buffer, "%7.2lfMB", mbs); 1381 return buffer; 1382 } 1383 #endif // PRODUCT 1384 1385 uint G1CollectorPolicy::max_regions(int purpose) { 1386 switch (purpose) { 1387 case GCAllocForSurvived: 1388 return _max_survivor_regions; 1389 case GCAllocForTenured: 1390 return REGIONS_UNLIMITED; 1391 default: 1392 ShouldNotReachHere(); 1393 return REGIONS_UNLIMITED; 1394 }; 1395 } 1396 1397 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1398 uint expansion_region_num = 0; 1399 if (GCLockerEdenExpansionPercent > 0) { 1400 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1401 double expansion_region_num_d = perc * (double) _young_list_target_length; 1402 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1403 // less than 1.0) we'll get 1. 1404 expansion_region_num = (uint) ceil(expansion_region_num_d); 1405 } else { 1406 assert(expansion_region_num == 0, "sanity"); 1407 } 1408 _young_list_max_length = _young_list_target_length + expansion_region_num; 1409 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1410 } 1411 1412 // Calculates survivor space parameters. 1413 void G1CollectorPolicy::update_survivors_policy() { 1414 double max_survivor_regions_d = 1415 (double) _young_list_target_length / (double) SurvivorRatio; 1416 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1417 // smaller than 1.0) we'll get 1. 1418 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1419 1420 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1421 HeapRegion::GrainWords * _max_survivor_regions); 1422 } 1423 1424 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( 1425 GCCause::Cause gc_cause) { 1426 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1427 if (!during_cycle) { 1428 ergo_verbose1(ErgoConcCycles, 1429 "request concurrent cycle initiation", 1430 ergo_format_reason("requested by GC cause") 1431 ergo_format_str("GC cause"), 1432 GCCause::to_string(gc_cause)); 1433 set_initiate_conc_mark_if_possible(); 1434 return true; 1435 } else { 1436 ergo_verbose1(ErgoConcCycles, 1437 "do not request concurrent cycle initiation", 1438 ergo_format_reason("concurrent cycle already in progress") 1439 ergo_format_str("GC cause"), 1440 GCCause::to_string(gc_cause)); 1441 return false; 1442 } 1443 } 1444 1445 void 1446 G1CollectorPolicy::decide_on_conc_mark_initiation() { 1447 // We are about to decide on whether this pause will be an 1448 // initial-mark pause. 1449 1450 // First, during_initial_mark_pause() should not be already set. We 1451 // will set it here if we have to. However, it should be cleared by 1452 // the end of the pause (it's only set for the duration of an 1453 // initial-mark pause). 1454 assert(!during_initial_mark_pause(), "pre-condition"); 1455 1456 if (initiate_conc_mark_if_possible()) { 1457 // We had noticed on a previous pause that the heap occupancy has 1458 // gone over the initiating threshold and we should start a 1459 // concurrent marking cycle. So we might initiate one. 1460 1461 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1462 if (!during_cycle) { 1463 // The concurrent marking thread is not "during a cycle", i.e., 1464 // it has completed the last one. So we can go ahead and 1465 // initiate a new cycle. 1466 1467 set_during_initial_mark_pause(); 1468 // We do not allow mixed GCs during marking. 1469 if (!gcs_are_young()) { 1470 set_gcs_are_young(true); 1471 ergo_verbose0(ErgoMixedGCs, 1472 "end mixed GCs", 1473 ergo_format_reason("concurrent cycle is about to start")); 1474 } 1475 1476 // And we can now clear initiate_conc_mark_if_possible() as 1477 // we've already acted on it. 1478 clear_initiate_conc_mark_if_possible(); 1479 1480 ergo_verbose0(ErgoConcCycles, 1481 "initiate concurrent cycle", 1482 ergo_format_reason("concurrent cycle initiation requested")); 1483 } else { 1484 // The concurrent marking thread is still finishing up the 1485 // previous cycle. If we start one right now the two cycles 1486 // overlap. In particular, the concurrent marking thread might 1487 // be in the process of clearing the next marking bitmap (which 1488 // we will use for the next cycle if we start one). Starting a 1489 // cycle now will be bad given that parts of the marking 1490 // information might get cleared by the marking thread. And we 1491 // cannot wait for the marking thread to finish the cycle as it 1492 // periodically yields while clearing the next marking bitmap 1493 // and, if it's in a yield point, it's waiting for us to 1494 // finish. So, at this point we will not start a cycle and we'll 1495 // let the concurrent marking thread complete the last one. 1496 ergo_verbose0(ErgoConcCycles, 1497 "do not initiate concurrent cycle", 1498 ergo_format_reason("concurrent cycle already in progress")); 1499 } 1500 } 1501 } 1502 1503 class KnownGarbageClosure: public HeapRegionClosure { 1504 G1CollectedHeap* _g1h; 1505 CollectionSetChooser* _hrSorted; 1506 1507 public: 1508 KnownGarbageClosure(CollectionSetChooser* hrSorted) : 1509 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { } 1510 1511 bool doHeapRegion(HeapRegion* r) { 1512 // We only include humongous regions in collection 1513 // sets when concurrent mark shows that their contained object is 1514 // unreachable. 1515 1516 // Do we have any marking information for this region? 1517 if (r->is_marked()) { 1518 // We will skip any region that's currently used as an old GC 1519 // alloc region (we should not consider those for collection 1520 // before we fill them up). 1521 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1522 _hrSorted->add_region(r); 1523 } 1524 } 1525 return false; 1526 } 1527 }; 1528 1529 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1530 G1CollectedHeap* _g1h; 1531 CSetChooserParUpdater _cset_updater; 1532 1533 public: 1534 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1535 uint chunk_size) : 1536 _g1h(G1CollectedHeap::heap()), 1537 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1538 1539 bool doHeapRegion(HeapRegion* r) { 1540 // Do we have any marking information for this region? 1541 if (r->is_marked()) { 1542 // We will skip any region that's currently used as an old GC 1543 // alloc region (we should not consider those for collection 1544 // before we fill them up). 1545 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1546 _cset_updater.add_region(r); 1547 } 1548 } 1549 return false; 1550 } 1551 }; 1552 1553 class ParKnownGarbageTask: public AbstractGangTask { 1554 CollectionSetChooser* _hrSorted; 1555 uint _chunk_size; 1556 G1CollectedHeap* _g1; 1557 public: 1558 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : 1559 AbstractGangTask("ParKnownGarbageTask"), 1560 _hrSorted(hrSorted), _chunk_size(chunk_size), 1561 _g1(G1CollectedHeap::heap()) { } 1562 1563 void work(uint worker_id) { 1564 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1565 1566 // Back to zero for the claim value. 1567 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, 1568 _g1->workers()->active_workers(), 1569 HeapRegion::InitialClaimValue); 1570 } 1571 }; 1572 1573 void 1574 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 1575 _collectionSetChooser->clear(); 1576 1577 uint region_num = _g1->n_regions(); 1578 if (G1CollectedHeap::use_parallel_gc_threads()) { 1579 const uint OverpartitionFactor = 4; 1580 uint WorkUnit; 1581 // The use of MinChunkSize = 8 in the original code 1582 // causes some assertion failures when the total number of 1583 // region is less than 8. The code here tries to fix that. 1584 // Should the original code also be fixed? 1585 if (no_of_gc_threads > 0) { 1586 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); 1587 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), 1588 MinWorkUnit); 1589 } else { 1590 assert(no_of_gc_threads > 0, 1591 "The active gc workers should be greater than 0"); 1592 // In a product build do something reasonable to avoid a crash. 1593 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); 1594 WorkUnit = 1595 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), 1596 MinWorkUnit); 1597 } 1598 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), 1599 WorkUnit); 1600 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 1601 (int) WorkUnit); 1602 _g1->workers()->run_task(&parKnownGarbageTask); 1603 1604 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1605 "sanity check"); 1606 } else { 1607 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 1608 _g1->heap_region_iterate(&knownGarbagecl); 1609 } 1610 1611 _collectionSetChooser->sort_regions(); 1612 1613 double end_sec = os::elapsedTime(); 1614 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1615 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1616 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1617 _prev_collection_pause_end_ms += elapsed_time_ms; 1618 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); 1619 } 1620 1621 // Add the heap region at the head of the non-incremental collection set 1622 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1623 assert(_inc_cset_build_state == Active, "Precondition"); 1624 assert(!hr->is_young(), "non-incremental add of young region"); 1625 1626 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1627 hr->set_in_collection_set(true); 1628 hr->set_next_in_collection_set(_collection_set); 1629 _collection_set = hr; 1630 _collection_set_bytes_used_before += hr->used(); 1631 _g1->register_region_with_in_cset_fast_test(hr); 1632 size_t rs_length = hr->rem_set()->occupied(); 1633 _recorded_rs_lengths += rs_length; 1634 _old_cset_region_length += 1; 1635 } 1636 1637 // Initialize the per-collection-set information 1638 void G1CollectorPolicy::start_incremental_cset_building() { 1639 assert(_inc_cset_build_state == Inactive, "Precondition"); 1640 1641 _inc_cset_head = NULL; 1642 _inc_cset_tail = NULL; 1643 _inc_cset_bytes_used_before = 0; 1644 1645 _inc_cset_max_finger = 0; 1646 _inc_cset_recorded_rs_lengths = 0; 1647 _inc_cset_recorded_rs_lengths_diffs = 0; 1648 _inc_cset_predicted_elapsed_time_ms = 0.0; 1649 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1650 _inc_cset_build_state = Active; 1651 } 1652 1653 void G1CollectorPolicy::finalize_incremental_cset_building() { 1654 assert(_inc_cset_build_state == Active, "Precondition"); 1655 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1656 1657 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1658 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1659 // that adds a new region to the CSet. Further updates by the 1660 // concurrent refinement thread that samples the young RSet lengths 1661 // are accumulated in the *_diffs fields. Here we add the diffs to 1662 // the "main" fields. 1663 1664 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1665 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1666 } else { 1667 // This is defensive. The diff should in theory be always positive 1668 // as RSets can only grow between GCs. However, given that we 1669 // sample their size concurrently with other threads updating them 1670 // it's possible that we might get the wrong size back, which 1671 // could make the calculations somewhat inaccurate. 1672 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1673 if (_inc_cset_recorded_rs_lengths >= diffs) { 1674 _inc_cset_recorded_rs_lengths -= diffs; 1675 } else { 1676 _inc_cset_recorded_rs_lengths = 0; 1677 } 1678 } 1679 _inc_cset_predicted_elapsed_time_ms += 1680 _inc_cset_predicted_elapsed_time_ms_diffs; 1681 1682 _inc_cset_recorded_rs_lengths_diffs = 0; 1683 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1684 } 1685 1686 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1687 // This routine is used when: 1688 // * adding survivor regions to the incremental cset at the end of an 1689 // evacuation pause, 1690 // * adding the current allocation region to the incremental cset 1691 // when it is retired, and 1692 // * updating existing policy information for a region in the 1693 // incremental cset via young list RSet sampling. 1694 // Therefore this routine may be called at a safepoint by the 1695 // VM thread, or in-between safepoints by mutator threads (when 1696 // retiring the current allocation region) or a concurrent 1697 // refine thread (RSet sampling). 1698 1699 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1700 size_t used_bytes = hr->used(); 1701 _inc_cset_recorded_rs_lengths += rs_length; 1702 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1703 _inc_cset_bytes_used_before += used_bytes; 1704 1705 // Cache the values we have added to the aggregated informtion 1706 // in the heap region in case we have to remove this region from 1707 // the incremental collection set, or it is updated by the 1708 // rset sampling code 1709 hr->set_recorded_rs_length(rs_length); 1710 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1711 } 1712 1713 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1714 size_t new_rs_length) { 1715 // Update the CSet information that is dependent on the new RS length 1716 assert(hr->is_young(), "Precondition"); 1717 assert(!SafepointSynchronize::is_at_safepoint(), 1718 "should not be at a safepoint"); 1719 1720 // We could have updated _inc_cset_recorded_rs_lengths and 1721 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1722 // that atomically, as this code is executed by a concurrent 1723 // refinement thread, potentially concurrently with a mutator thread 1724 // allocating a new region and also updating the same fields. To 1725 // avoid the atomic operations we accumulate these updates on two 1726 // separate fields (*_diffs) and we'll just add them to the "main" 1727 // fields at the start of a GC. 1728 1729 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1730 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1731 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1732 1733 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1734 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1735 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1736 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1737 1738 hr->set_recorded_rs_length(new_rs_length); 1739 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1740 } 1741 1742 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1743 assert(hr->is_young(), "invariant"); 1744 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1745 assert(_inc_cset_build_state == Active, "Precondition"); 1746 1747 // We need to clear and set the cached recorded/cached collection set 1748 // information in the heap region here (before the region gets added 1749 // to the collection set). An individual heap region's cached values 1750 // are calculated, aggregated with the policy collection set info, 1751 // and cached in the heap region here (initially) and (subsequently) 1752 // by the Young List sampling code. 1753 1754 size_t rs_length = hr->rem_set()->occupied(); 1755 add_to_incremental_cset_info(hr, rs_length); 1756 1757 HeapWord* hr_end = hr->end(); 1758 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1759 1760 assert(!hr->in_collection_set(), "invariant"); 1761 hr->set_in_collection_set(true); 1762 assert( hr->next_in_collection_set() == NULL, "invariant"); 1763 1764 _g1->register_region_with_in_cset_fast_test(hr); 1765 } 1766 1767 // Add the region at the RHS of the incremental cset 1768 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1769 // We should only ever be appending survivors at the end of a pause 1770 assert( hr->is_survivor(), "Logic"); 1771 1772 // Do the 'common' stuff 1773 add_region_to_incremental_cset_common(hr); 1774 1775 // Now add the region at the right hand side 1776 if (_inc_cset_tail == NULL) { 1777 assert(_inc_cset_head == NULL, "invariant"); 1778 _inc_cset_head = hr; 1779 } else { 1780 _inc_cset_tail->set_next_in_collection_set(hr); 1781 } 1782 _inc_cset_tail = hr; 1783 } 1784 1785 // Add the region to the LHS of the incremental cset 1786 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1787 // Survivors should be added to the RHS at the end of a pause 1788 assert(!hr->is_survivor(), "Logic"); 1789 1790 // Do the 'common' stuff 1791 add_region_to_incremental_cset_common(hr); 1792 1793 // Add the region at the left hand side 1794 hr->set_next_in_collection_set(_inc_cset_head); 1795 if (_inc_cset_head == NULL) { 1796 assert(_inc_cset_tail == NULL, "Invariant"); 1797 _inc_cset_tail = hr; 1798 } 1799 _inc_cset_head = hr; 1800 } 1801 1802 #ifndef PRODUCT 1803 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1804 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 1805 1806 st->print_cr("\nCollection_set:"); 1807 HeapRegion* csr = list_head; 1808 while (csr != NULL) { 1809 HeapRegion* next = csr->next_in_collection_set(); 1810 assert(csr->in_collection_set(), "bad CS"); 1811 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", 1812 HR_FORMAT_PARAMS(csr), 1813 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(), 1814 csr->age_in_surv_rate_group_cond()); 1815 csr = next; 1816 } 1817 } 1818 #endif // !PRODUCT 1819 1820 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) { 1821 // Returns the given amount of reclaimable bytes (that represents 1822 // the amount of reclaimable space still to be collected) as a 1823 // percentage of the current heap capacity. 1824 size_t capacity_bytes = _g1->capacity(); 1825 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1826 } 1827 1828 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1829 const char* false_action_str) { 1830 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1831 if (cset_chooser->is_empty()) { 1832 ergo_verbose0(ErgoMixedGCs, 1833 false_action_str, 1834 ergo_format_reason("candidate old regions not available")); 1835 return false; 1836 } 1837 1838 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1839 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 1840 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1841 double threshold = (double) G1HeapWastePercent; 1842 if (reclaimable_perc <= threshold) { 1843 ergo_verbose4(ErgoMixedGCs, 1844 false_action_str, 1845 ergo_format_reason("reclaimable percentage not over threshold") 1846 ergo_format_region("candidate old regions") 1847 ergo_format_byte_perc("reclaimable") 1848 ergo_format_perc("threshold"), 1849 cset_chooser->remaining_regions(), 1850 reclaimable_bytes, 1851 reclaimable_perc, threshold); 1852 return false; 1853 } 1854 1855 ergo_verbose4(ErgoMixedGCs, 1856 true_action_str, 1857 ergo_format_reason("candidate old regions available") 1858 ergo_format_region("candidate old regions") 1859 ergo_format_byte_perc("reclaimable") 1860 ergo_format_perc("threshold"), 1861 cset_chooser->remaining_regions(), 1862 reclaimable_bytes, 1863 reclaimable_perc, threshold); 1864 return true; 1865 } 1866 1867 uint G1CollectorPolicy::calc_min_old_cset_length() { 1868 // The min old CSet region bound is based on the maximum desired 1869 // number of mixed GCs after a cycle. I.e., even if some old regions 1870 // look expensive, we should add them to the CSet anyway to make 1871 // sure we go through the available old regions in no more than the 1872 // maximum desired number of mixed GCs. 1873 // 1874 // The calculation is based on the number of marked regions we added 1875 // to the CSet chooser in the first place, not how many remain, so 1876 // that the result is the same during all mixed GCs that follow a cycle. 1877 1878 const size_t region_num = (size_t) _collectionSetChooser->length(); 1879 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1880 size_t result = region_num / gc_num; 1881 // emulate ceiling 1882 if (result * gc_num < region_num) { 1883 result += 1; 1884 } 1885 return (uint) result; 1886 } 1887 1888 uint G1CollectorPolicy::calc_max_old_cset_length() { 1889 // The max old CSet region bound is based on the threshold expressed 1890 // as a percentage of the heap size. I.e., it should bound the 1891 // number of old regions added to the CSet irrespective of how many 1892 // of them are available. 1893 1894 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1895 const size_t region_num = g1h->n_regions(); 1896 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1897 size_t result = region_num * perc / 100; 1898 // emulate ceiling 1899 if (100 * result < region_num * perc) { 1900 result += 1; 1901 } 1902 return (uint) result; 1903 } 1904 1905 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { 1906 double young_start_time_sec = os::elapsedTime(); 1907 1908 YoungList* young_list = _g1->young_list(); 1909 finalize_incremental_cset_building(); 1910 1911 guarantee(target_pause_time_ms > 0.0, 1912 err_msg("target_pause_time_ms = %1.6lf should be positive", 1913 target_pause_time_ms)); 1914 guarantee(_collection_set == NULL, "Precondition"); 1915 1916 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 1917 double predicted_pause_time_ms = base_time_ms; 1918 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 1919 1920 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 1921 "start choosing CSet", 1922 ergo_format_size("_pending_cards") 1923 ergo_format_ms("predicted base time") 1924 ergo_format_ms("remaining time") 1925 ergo_format_ms("target pause time"), 1926 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 1927 1928 _last_gc_was_young = gcs_are_young() ? true : false; 1929 1930 if (_last_gc_was_young) { 1931 _trace_gen0_time_data.increment_young_collection_count(); 1932 } else { 1933 _trace_gen0_time_data.increment_mixed_collection_count(); 1934 } 1935 1936 // The young list is laid with the survivor regions from the previous 1937 // pause are appended to the RHS of the young list, i.e. 1938 // [Newly Young Regions ++ Survivors from last pause]. 1939 1940 uint survivor_region_length = young_list->survivor_length(); 1941 uint eden_region_length = young_list->length() - survivor_region_length; 1942 init_cset_region_lengths(eden_region_length, survivor_region_length); 1943 1944 HeapRegion* hr = young_list->first_survivor_region(); 1945 while (hr != NULL) { 1946 assert(hr->is_survivor(), "badly formed young list"); 1947 hr->set_young(); 1948 hr = hr->get_next_young_region(); 1949 } 1950 1951 // Clear the fields that point to the survivor list - they are all young now. 1952 young_list->clear_survivors(); 1953 1954 _collection_set = _inc_cset_head; 1955 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 1956 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 1957 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 1958 1959 ergo_verbose3(ErgoCSetConstruction | ErgoHigh, 1960 "add young regions to CSet", 1961 ergo_format_region("eden") 1962 ergo_format_region("survivors") 1963 ergo_format_ms("predicted young region time"), 1964 eden_region_length, survivor_region_length, 1965 _inc_cset_predicted_elapsed_time_ms); 1966 1967 // The number of recorded young regions is the incremental 1968 // collection set's current size 1969 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 1970 1971 double young_end_time_sec = os::elapsedTime(); 1972 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 1973 1974 // Set the start of the non-young choice time. 1975 double non_young_start_time_sec = young_end_time_sec; 1976 1977 if (!gcs_are_young()) { 1978 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1979 cset_chooser->verify(); 1980 const uint min_old_cset_length = calc_min_old_cset_length(); 1981 const uint max_old_cset_length = calc_max_old_cset_length(); 1982 1983 uint expensive_region_num = 0; 1984 bool check_time_remaining = adaptive_young_list_length(); 1985 1986 HeapRegion* hr = cset_chooser->peek(); 1987 while (hr != NULL) { 1988 if (old_cset_region_length() >= max_old_cset_length) { 1989 // Added maximum number of old regions to the CSet. 1990 ergo_verbose2(ErgoCSetConstruction, 1991 "finish adding old regions to CSet", 1992 ergo_format_reason("old CSet region num reached max") 1993 ergo_format_region("old") 1994 ergo_format_region("max"), 1995 old_cset_region_length(), max_old_cset_length); 1996 break; 1997 } 1998 1999 2000 // Stop adding regions if the remaining reclaimable space is 2001 // not above G1HeapWastePercent. 2002 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2003 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2004 double threshold = (double) G1HeapWastePercent; 2005 if (reclaimable_perc <= threshold) { 2006 // We've added enough old regions that the amount of uncollected 2007 // reclaimable space is at or below the waste threshold. Stop 2008 // adding old regions to the CSet. 2009 ergo_verbose5(ErgoCSetConstruction, 2010 "finish adding old regions to CSet", 2011 ergo_format_reason("reclaimable percentage not over threshold") 2012 ergo_format_region("old") 2013 ergo_format_region("max") 2014 ergo_format_byte_perc("reclaimable") 2015 ergo_format_perc("threshold"), 2016 old_cset_region_length(), 2017 max_old_cset_length, 2018 reclaimable_bytes, 2019 reclaimable_perc, threshold); 2020 break; 2021 } 2022 2023 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 2024 if (check_time_remaining) { 2025 if (predicted_time_ms > time_remaining_ms) { 2026 // Too expensive for the current CSet. 2027 2028 if (old_cset_region_length() >= min_old_cset_length) { 2029 // We have added the minimum number of old regions to the CSet, 2030 // we are done with this CSet. 2031 ergo_verbose4(ErgoCSetConstruction, 2032 "finish adding old regions to CSet", 2033 ergo_format_reason("predicted time is too high") 2034 ergo_format_ms("predicted time") 2035 ergo_format_ms("remaining time") 2036 ergo_format_region("old") 2037 ergo_format_region("min"), 2038 predicted_time_ms, time_remaining_ms, 2039 old_cset_region_length(), min_old_cset_length); 2040 break; 2041 } 2042 2043 // We'll add it anyway given that we haven't reached the 2044 // minimum number of old regions. 2045 expensive_region_num += 1; 2046 } 2047 } else { 2048 if (old_cset_region_length() >= min_old_cset_length) { 2049 // In the non-auto-tuning case, we'll finish adding regions 2050 // to the CSet if we reach the minimum. 2051 ergo_verbose2(ErgoCSetConstruction, 2052 "finish adding old regions to CSet", 2053 ergo_format_reason("old CSet region num reached min") 2054 ergo_format_region("old") 2055 ergo_format_region("min"), 2056 old_cset_region_length(), min_old_cset_length); 2057 break; 2058 } 2059 } 2060 2061 // We will add this region to the CSet. 2062 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2063 predicted_pause_time_ms += predicted_time_ms; 2064 cset_chooser->remove_and_move_to_next(hr); 2065 _g1->old_set_remove(hr); 2066 add_old_region_to_cset(hr); 2067 2068 hr = cset_chooser->peek(); 2069 } 2070 if (hr == NULL) { 2071 ergo_verbose0(ErgoCSetConstruction, 2072 "finish adding old regions to CSet", 2073 ergo_format_reason("candidate old regions not available")); 2074 } 2075 2076 if (expensive_region_num > 0) { 2077 // We print the information once here at the end, predicated on 2078 // whether we added any apparently expensive regions or not, to 2079 // avoid generating output per region. 2080 ergo_verbose4(ErgoCSetConstruction, 2081 "added expensive regions to CSet", 2082 ergo_format_reason("old CSet region num not reached min") 2083 ergo_format_region("old") 2084 ergo_format_region("expensive") 2085 ergo_format_region("min") 2086 ergo_format_ms("remaining time"), 2087 old_cset_region_length(), 2088 expensive_region_num, 2089 min_old_cset_length, 2090 time_remaining_ms); 2091 } 2092 2093 cset_chooser->verify(); 2094 } 2095 2096 stop_incremental_cset_building(); 2097 2098 ergo_verbose5(ErgoCSetConstruction, 2099 "finish choosing CSet", 2100 ergo_format_region("eden") 2101 ergo_format_region("survivors") 2102 ergo_format_region("old") 2103 ergo_format_ms("predicted pause time") 2104 ergo_format_ms("target pause time"), 2105 eden_region_length, survivor_region_length, 2106 old_cset_region_length(), 2107 predicted_pause_time_ms, target_pause_time_ms); 2108 2109 double non_young_end_time_sec = os::elapsedTime(); 2110 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2111 evacuation_info.set_collectionset_regions(cset_region_length()); 2112 } 2113 2114 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { 2115 if(TraceGen0Time) { 2116 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2117 } 2118 } 2119 2120 void TraceGen0TimeData::record_yield_time(double yield_time_ms) { 2121 if(TraceGen0Time) { 2122 _all_yield_times_ms.add(yield_time_ms); 2123 } 2124 } 2125 2126 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2127 if(TraceGen0Time) { 2128 _total.add(pause_time_ms); 2129 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2130 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2131 _parallel.add(phase_times->cur_collection_par_time_ms()); 2132 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time()); 2133 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms()); 2134 _update_rs.add(phase_times->average_last_update_rs_time()); 2135 _scan_rs.add(phase_times->average_last_scan_rs_time()); 2136 _obj_copy.add(phase_times->average_last_obj_copy_time()); 2137 _termination.add(phase_times->average_last_termination_time()); 2138 2139 double parallel_known_time = phase_times->average_last_ext_root_scan_time() + 2140 phase_times->average_last_satb_filtering_times_ms() + 2141 phase_times->average_last_update_rs_time() + 2142 phase_times->average_last_scan_rs_time() + 2143 phase_times->average_last_obj_copy_time() + 2144 + phase_times->average_last_termination_time(); 2145 2146 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2147 _parallel_other.add(parallel_other_time); 2148 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2149 } 2150 } 2151 2152 void TraceGen0TimeData::increment_young_collection_count() { 2153 if(TraceGen0Time) { 2154 ++_young_pause_num; 2155 } 2156 } 2157 2158 void TraceGen0TimeData::increment_mixed_collection_count() { 2159 if(TraceGen0Time) { 2160 ++_mixed_pause_num; 2161 } 2162 } 2163 2164 void TraceGen0TimeData::print_summary(const char* str, 2165 const NumberSeq* seq) const { 2166 double sum = seq->sum(); 2167 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2168 str, sum / 1000.0, seq->avg()); 2169 } 2170 2171 void TraceGen0TimeData::print_summary_sd(const char* str, 2172 const NumberSeq* seq) const { 2173 print_summary(str, seq); 2174 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2175 "(num", seq->num(), seq->sd(), seq->maximum()); 2176 } 2177 2178 void TraceGen0TimeData::print() const { 2179 if (!TraceGen0Time) { 2180 return; 2181 } 2182 2183 gclog_or_tty->print_cr("ALL PAUSES"); 2184 print_summary_sd(" Total", &_total); 2185 gclog_or_tty->print_cr(""); 2186 gclog_or_tty->print_cr(""); 2187 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2188 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2189 gclog_or_tty->print_cr(""); 2190 2191 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2192 2193 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2194 gclog_or_tty->print_cr("none"); 2195 } else { 2196 print_summary_sd(" Evacuation Pauses", &_total); 2197 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2198 print_summary(" Parallel Time", &_parallel); 2199 print_summary(" Ext Root Scanning", &_ext_root_scan); 2200 print_summary(" SATB Filtering", &_satb_filtering); 2201 print_summary(" Update RS", &_update_rs); 2202 print_summary(" Scan RS", &_scan_rs); 2203 print_summary(" Object Copy", &_obj_copy); 2204 print_summary(" Termination", &_termination); 2205 print_summary(" Parallel Other", &_parallel_other); 2206 print_summary(" Clear CT", &_clear_ct); 2207 print_summary(" Other", &_other); 2208 } 2209 gclog_or_tty->print_cr(""); 2210 2211 gclog_or_tty->print_cr("MISC"); 2212 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2213 print_summary_sd(" Yields", &_all_yield_times_ms); 2214 } 2215 2216 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) { 2217 if (TraceGen1Time) { 2218 _all_full_gc_times.add(full_gc_time_ms); 2219 } 2220 } 2221 2222 void TraceGen1TimeData::print() const { 2223 if (!TraceGen1Time) { 2224 return; 2225 } 2226 2227 if (_all_full_gc_times.num() > 0) { 2228 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2229 _all_full_gc_times.num(), 2230 _all_full_gc_times.sum() / 1000.0); 2231 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2232 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2233 _all_full_gc_times.sd(), 2234 _all_full_gc_times.maximum()); 2235 } 2236 }