1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentG1Refine.hpp" 27 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "gc_implementation/shared/gcPolicyCounters.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "utilities/debug.hpp" 40 41 // Different defaults for different number of GC threads 42 // They were chosen by running GCOld and SPECjbb on debris with different 43 // numbers of GC threads and choosing them based on the results 44 45 // all the same 46 static double rs_length_diff_defaults[] = { 47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 48 }; 49 50 static double cost_per_card_ms_defaults[] = { 51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 52 }; 53 54 // all the same 55 static double young_cards_per_entry_ratio_defaults[] = { 56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 57 }; 58 59 static double cost_per_entry_ms_defaults[] = { 60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 61 }; 62 63 static double cost_per_byte_ms_defaults[] = { 64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 65 }; 66 67 // these should be pretty consistent 68 static double constant_other_time_ms_defaults[] = { 69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 70 }; 71 72 73 static double young_other_cost_per_region_ms_defaults[] = { 74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 75 }; 76 77 static double non_young_other_cost_per_region_ms_defaults[] = { 78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 79 }; 80 81 G1CollectorPolicy::G1CollectorPolicy() : 82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 83 ? ParallelGCThreads : 1), 84 85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 86 _stop_world_start(0.0), 87 88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 92 _prev_collection_pause_end_ms(0.0), 93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _non_young_other_cost_per_region_ms_seq( 104 new TruncatedSeq(TruncatedSeqLength)), 105 106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 108 109 _pause_time_target_ms((double) MaxGCPauseMillis), 110 111 _gcs_are_young(true), 112 113 _during_marking(false), 114 _in_marking_window(false), 115 _in_marking_window_im(false), 116 117 _recent_prev_end_times_for_all_gcs_sec( 118 new TruncatedSeq(NumPrevPausesForHeuristics)), 119 120 _recent_avg_pause_time_ratio(0.0), 121 122 _initiate_conc_mark_if_possible(false), 123 _during_initial_mark_pause(false), 124 _last_young_gc(false), 125 _last_gc_was_young(false), 126 127 _eden_used_bytes_before_gc(0), 128 _survivor_used_bytes_before_gc(0), 129 _heap_used_bytes_before_gc(0), 130 _metaspace_used_bytes_before_gc(0), 131 _eden_capacity_bytes_before_gc(0), 132 _heap_capacity_bytes_before_gc(0), 133 134 _eden_cset_region_length(0), 135 _survivor_cset_region_length(0), 136 _old_cset_region_length(0), 137 138 _collection_set(NULL), 139 _collection_set_bytes_used_before(0), 140 141 // Incremental CSet attributes 142 _inc_cset_build_state(Inactive), 143 _inc_cset_head(NULL), 144 _inc_cset_tail(NULL), 145 _inc_cset_bytes_used_before(0), 146 _inc_cset_max_finger(NULL), 147 _inc_cset_recorded_rs_lengths(0), 148 _inc_cset_recorded_rs_lengths_diffs(0), 149 _inc_cset_predicted_elapsed_time_ms(0.0), 150 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 151 152 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 153 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 154 #endif // _MSC_VER 155 156 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", 157 G1YoungSurvRateNumRegionsSummary)), 158 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", 159 G1YoungSurvRateNumRegionsSummary)), 160 // add here any more surv rate groups 161 _recorded_survivor_regions(0), 162 _recorded_survivor_head(NULL), 163 _recorded_survivor_tail(NULL), 164 _survivors_age_table(true), 165 166 _gc_overhead_perc(0.0) { 167 168 // Set up the region size and associated fields. Given that the 169 // policy is created before the heap, we have to set this up here, 170 // so it's done as soon as possible. 171 172 // It would have been natural to pass initial_heap_byte_size() and 173 // max_heap_byte_size() to setup_heap_region_size() but those have 174 // not been set up at this point since they should be aligned with 175 // the region size. So, there is a circular dependency here. We base 176 // the region size on the heap size, but the heap size should be 177 // aligned with the region size. To get around this we use the 178 // unaligned values for the heap. 179 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 180 HeapRegionRemSet::setup_remset_size(); 181 182 G1ErgoVerbose::initialize(); 183 if (PrintAdaptiveSizePolicy) { 184 // Currently, we only use a single switch for all the heuristics. 185 G1ErgoVerbose::set_enabled(true); 186 // Given that we don't currently have a verboseness level 187 // parameter, we'll hardcode this to high. This can be easily 188 // changed in the future. 189 G1ErgoVerbose::set_level(ErgoHigh); 190 } else { 191 G1ErgoVerbose::set_enabled(false); 192 } 193 194 // Verify PLAB sizes 195 const size_t region_size = HeapRegion::GrainWords; 196 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 197 char buffer[128]; 198 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT, 199 OldPLABSize > region_size ? "Old" : "Young", region_size); 200 vm_exit_during_initialization(buffer); 201 } 202 203 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 204 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 205 206 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 207 208 int index = MIN2(_parallel_gc_threads - 1, 7); 209 210 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 211 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 212 _young_cards_per_entry_ratio_seq->add( 213 young_cards_per_entry_ratio_defaults[index]); 214 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 215 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 216 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 217 _young_other_cost_per_region_ms_seq->add( 218 young_other_cost_per_region_ms_defaults[index]); 219 _non_young_other_cost_per_region_ms_seq->add( 220 non_young_other_cost_per_region_ms_defaults[index]); 221 222 // Below, we might need to calculate the pause time target based on 223 // the pause interval. When we do so we are going to give G1 maximum 224 // flexibility and allow it to do pauses when it needs to. So, we'll 225 // arrange that the pause interval to be pause time target + 1 to 226 // ensure that a) the pause time target is maximized with respect to 227 // the pause interval and b) we maintain the invariant that pause 228 // time target < pause interval. If the user does not want this 229 // maximum flexibility, they will have to set the pause interval 230 // explicitly. 231 232 // First make sure that, if either parameter is set, its value is 233 // reasonable. 234 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 235 if (MaxGCPauseMillis < 1) { 236 vm_exit_during_initialization("MaxGCPauseMillis should be " 237 "greater than 0"); 238 } 239 } 240 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 241 if (GCPauseIntervalMillis < 1) { 242 vm_exit_during_initialization("GCPauseIntervalMillis should be " 243 "greater than 0"); 244 } 245 } 246 247 // Then, if the pause time target parameter was not set, set it to 248 // the default value. 249 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 250 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 251 // The default pause time target in G1 is 200ms 252 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 253 } else { 254 // We do not allow the pause interval to be set without the 255 // pause time target 256 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 257 "without setting MaxGCPauseMillis"); 258 } 259 } 260 261 // Then, if the interval parameter was not set, set it according to 262 // the pause time target (this will also deal with the case when the 263 // pause time target is the default value). 264 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 265 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 266 } 267 268 // Finally, make sure that the two parameters are consistent. 269 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 270 char buffer[256]; 271 jio_snprintf(buffer, 256, 272 "MaxGCPauseMillis (%u) should be less than " 273 "GCPauseIntervalMillis (%u)", 274 MaxGCPauseMillis, GCPauseIntervalMillis); 275 vm_exit_during_initialization(buffer); 276 } 277 278 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 279 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 280 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 281 282 uintx confidence_perc = G1ConfidencePercent; 283 // Put an artificial ceiling on this so that it's not set to a silly value. 284 if (confidence_perc > 100) { 285 confidence_perc = 100; 286 warning("G1ConfidencePercent is set to a value that is too large, " 287 "it's been updated to %u", confidence_perc); 288 } 289 _sigma = (double) confidence_perc / 100.0; 290 291 // start conservatively (around 50ms is about right) 292 _concurrent_mark_remark_times_ms->add(0.05); 293 _concurrent_mark_cleanup_times_ms->add(0.20); 294 _tenuring_threshold = MaxTenuringThreshold; 295 // _max_survivor_regions will be calculated by 296 // update_young_list_target_length() during initialization. 297 _max_survivor_regions = 0; 298 299 assert(GCTimeRatio > 0, 300 "we should have set it to a default value set_g1_gc_flags() " 301 "if a user set it to 0"); 302 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 303 304 uintx reserve_perc = G1ReservePercent; 305 // Put an artificial ceiling on this so that it's not set to a silly value. 306 if (reserve_perc > 50) { 307 reserve_perc = 50; 308 warning("G1ReservePercent is set to a value that is too large, " 309 "it's been updated to %u", reserve_perc); 310 } 311 _reserve_factor = (double) reserve_perc / 100.0; 312 // This will be set when the heap is expanded 313 // for the first time during initialization. 314 _reserve_regions = 0; 315 316 initialize_all(); 317 _collectionSetChooser = new CollectionSetChooser(); 318 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 319 } 320 321 void G1CollectorPolicy::initialize_flags() { 322 _min_alignment = HeapRegion::GrainBytes; 323 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 324 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 325 _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size); 326 if (SurvivorRatio < 1) { 327 vm_exit_during_initialization("Invalid survivor ratio specified"); 328 } 329 CollectorPolicy::initialize_flags(); 330 } 331 332 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) { 333 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max"); 334 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds"); 335 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds"); 336 337 if (FLAG_IS_CMDLINE(NewRatio)) { 338 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 339 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 340 } else { 341 _sizer_kind = SizerNewRatio; 342 _adaptive_size = false; 343 return; 344 } 345 } 346 347 if (FLAG_IS_CMDLINE(NewSize)) { 348 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 349 1U); 350 if (FLAG_IS_CMDLINE(MaxNewSize)) { 351 _max_desired_young_length = 352 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 353 1U); 354 _sizer_kind = SizerMaxAndNewSize; 355 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 356 } else { 357 _sizer_kind = SizerNewSizeOnly; 358 } 359 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 360 _max_desired_young_length = 361 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 362 1U); 363 _sizer_kind = SizerMaxNewSizeOnly; 364 } 365 } 366 367 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 368 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 369 return MAX2(1U, default_value); 370 } 371 372 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 373 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 374 return MAX2(1U, default_value); 375 } 376 377 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 378 assert(new_number_of_heap_regions > 0, "Heap must be initialized"); 379 380 switch (_sizer_kind) { 381 case SizerDefaults: 382 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 383 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 384 break; 385 case SizerNewSizeOnly: 386 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 387 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length); 388 break; 389 case SizerMaxNewSizeOnly: 390 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 391 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length); 392 break; 393 case SizerMaxAndNewSize: 394 // Do nothing. Values set on the command line, don't update them at runtime. 395 break; 396 case SizerNewRatio: 397 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1); 398 _max_desired_young_length = _min_desired_young_length; 399 break; 400 default: 401 ShouldNotReachHere(); 402 } 403 404 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values"); 405 } 406 407 void G1CollectorPolicy::init() { 408 // Set aside an initial future to_space. 409 _g1 = G1CollectedHeap::heap(); 410 411 assert(Heap_lock->owned_by_self(), "Locking discipline."); 412 413 initialize_gc_policy_counters(); 414 415 if (adaptive_young_list_length()) { 416 _young_list_fixed_length = 0; 417 } else { 418 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 419 } 420 _free_regions_at_end_of_collection = _g1->free_regions(); 421 update_young_list_target_length(); 422 423 // We may immediately start allocating regions and placing them on the 424 // collection set list. Initialize the per-collection set info 425 start_incremental_cset_building(); 426 } 427 428 // Create the jstat counters for the policy. 429 void G1CollectorPolicy::initialize_gc_policy_counters() { 430 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 431 } 432 433 bool G1CollectorPolicy::predict_will_fit(uint young_length, 434 double base_time_ms, 435 uint base_free_regions, 436 double target_pause_time_ms) { 437 if (young_length >= base_free_regions) { 438 // end condition 1: not enough space for the young regions 439 return false; 440 } 441 442 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 443 size_t bytes_to_copy = 444 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 445 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 446 double young_other_time_ms = predict_young_other_time_ms(young_length); 447 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 448 if (pause_time_ms > target_pause_time_ms) { 449 // end condition 2: prediction is over the target pause time 450 return false; 451 } 452 453 size_t free_bytes = 454 (base_free_regions - young_length) * HeapRegion::GrainBytes; 455 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { 456 // end condition 3: out-of-space (conservatively!) 457 return false; 458 } 459 460 // success! 461 return true; 462 } 463 464 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 465 // re-calculate the necessary reserve 466 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 467 // We use ceiling so that if reserve_regions_d is > 0.0 (but 468 // smaller than 1.0) we'll get 1. 469 _reserve_regions = (uint) ceil(reserve_regions_d); 470 471 _young_gen_sizer->heap_size_changed(new_number_of_regions); 472 } 473 474 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 475 uint base_min_length) { 476 uint desired_min_length = 0; 477 if (adaptive_young_list_length()) { 478 if (_alloc_rate_ms_seq->num() > 3) { 479 double now_sec = os::elapsedTime(); 480 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 481 double alloc_rate_ms = predict_alloc_rate_ms(); 482 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 483 } else { 484 // otherwise we don't have enough info to make the prediction 485 } 486 } 487 desired_min_length += base_min_length; 488 // make sure we don't go below any user-defined minimum bound 489 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 490 } 491 492 uint G1CollectorPolicy::calculate_young_list_desired_max_length() { 493 // Here, we might want to also take into account any additional 494 // constraints (i.e., user-defined minimum bound). Currently, we 495 // effectively don't set this bound. 496 return _young_gen_sizer->max_desired_young_length(); 497 } 498 499 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 500 if (rs_lengths == (size_t) -1) { 501 // if it's set to the default value (-1), we should predict it; 502 // otherwise, use the given value. 503 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 504 } 505 506 // Calculate the absolute and desired min bounds. 507 508 // This is how many young regions we already have (currently: the survivors). 509 uint base_min_length = recorded_survivor_regions(); 510 // This is the absolute minimum young length, which ensures that we 511 // can allocate one eden region in the worst-case. 512 uint absolute_min_length = base_min_length + 1; 513 uint desired_min_length = 514 calculate_young_list_desired_min_length(base_min_length); 515 if (desired_min_length < absolute_min_length) { 516 desired_min_length = absolute_min_length; 517 } 518 519 // Calculate the absolute and desired max bounds. 520 521 // We will try our best not to "eat" into the reserve. 522 uint absolute_max_length = 0; 523 if (_free_regions_at_end_of_collection > _reserve_regions) { 524 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 525 } 526 uint desired_max_length = calculate_young_list_desired_max_length(); 527 if (desired_max_length > absolute_max_length) { 528 desired_max_length = absolute_max_length; 529 } 530 531 uint young_list_target_length = 0; 532 if (adaptive_young_list_length()) { 533 if (gcs_are_young()) { 534 young_list_target_length = 535 calculate_young_list_target_length(rs_lengths, 536 base_min_length, 537 desired_min_length, 538 desired_max_length); 539 _rs_lengths_prediction = rs_lengths; 540 } else { 541 // Don't calculate anything and let the code below bound it to 542 // the desired_min_length, i.e., do the next GC as soon as 543 // possible to maximize how many old regions we can add to it. 544 } 545 } else { 546 // The user asked for a fixed young gen so we'll fix the young gen 547 // whether the next GC is young or mixed. 548 young_list_target_length = _young_list_fixed_length; 549 } 550 551 // Make sure we don't go over the desired max length, nor under the 552 // desired min length. In case they clash, desired_min_length wins 553 // which is why that test is second. 554 if (young_list_target_length > desired_max_length) { 555 young_list_target_length = desired_max_length; 556 } 557 if (young_list_target_length < desired_min_length) { 558 young_list_target_length = desired_min_length; 559 } 560 561 assert(young_list_target_length > recorded_survivor_regions(), 562 "we should be able to allocate at least one eden region"); 563 assert(young_list_target_length >= absolute_min_length, "post-condition"); 564 _young_list_target_length = young_list_target_length; 565 566 update_max_gc_locker_expansion(); 567 } 568 569 uint 570 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 571 uint base_min_length, 572 uint desired_min_length, 573 uint desired_max_length) { 574 assert(adaptive_young_list_length(), "pre-condition"); 575 assert(gcs_are_young(), "only call this for young GCs"); 576 577 // In case some edge-condition makes the desired max length too small... 578 if (desired_max_length <= desired_min_length) { 579 return desired_min_length; 580 } 581 582 // We'll adjust min_young_length and max_young_length not to include 583 // the already allocated young regions (i.e., so they reflect the 584 // min and max eden regions we'll allocate). The base_min_length 585 // will be reflected in the predictions by the 586 // survivor_regions_evac_time prediction. 587 assert(desired_min_length > base_min_length, "invariant"); 588 uint min_young_length = desired_min_length - base_min_length; 589 assert(desired_max_length > base_min_length, "invariant"); 590 uint max_young_length = desired_max_length - base_min_length; 591 592 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 593 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 594 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 595 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 596 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 597 double base_time_ms = 598 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 599 survivor_regions_evac_time; 600 uint available_free_regions = _free_regions_at_end_of_collection; 601 uint base_free_regions = 0; 602 if (available_free_regions > _reserve_regions) { 603 base_free_regions = available_free_regions - _reserve_regions; 604 } 605 606 // Here, we will make sure that the shortest young length that 607 // makes sense fits within the target pause time. 608 609 if (predict_will_fit(min_young_length, base_time_ms, 610 base_free_regions, target_pause_time_ms)) { 611 // The shortest young length will fit into the target pause time; 612 // we'll now check whether the absolute maximum number of young 613 // regions will fit in the target pause time. If not, we'll do 614 // a binary search between min_young_length and max_young_length. 615 if (predict_will_fit(max_young_length, base_time_ms, 616 base_free_regions, target_pause_time_ms)) { 617 // The maximum young length will fit into the target pause time. 618 // We are done so set min young length to the maximum length (as 619 // the result is assumed to be returned in min_young_length). 620 min_young_length = max_young_length; 621 } else { 622 // The maximum possible number of young regions will not fit within 623 // the target pause time so we'll search for the optimal 624 // length. The loop invariants are: 625 // 626 // min_young_length < max_young_length 627 // min_young_length is known to fit into the target pause time 628 // max_young_length is known not to fit into the target pause time 629 // 630 // Going into the loop we know the above hold as we've just 631 // checked them. Every time around the loop we check whether 632 // the middle value between min_young_length and 633 // max_young_length fits into the target pause time. If it 634 // does, it becomes the new min. If it doesn't, it becomes 635 // the new max. This way we maintain the loop invariants. 636 637 assert(min_young_length < max_young_length, "invariant"); 638 uint diff = (max_young_length - min_young_length) / 2; 639 while (diff > 0) { 640 uint young_length = min_young_length + diff; 641 if (predict_will_fit(young_length, base_time_ms, 642 base_free_regions, target_pause_time_ms)) { 643 min_young_length = young_length; 644 } else { 645 max_young_length = young_length; 646 } 647 assert(min_young_length < max_young_length, "invariant"); 648 diff = (max_young_length - min_young_length) / 2; 649 } 650 // The results is min_young_length which, according to the 651 // loop invariants, should fit within the target pause time. 652 653 // These are the post-conditions of the binary search above: 654 assert(min_young_length < max_young_length, 655 "otherwise we should have discovered that max_young_length " 656 "fits into the pause target and not done the binary search"); 657 assert(predict_will_fit(min_young_length, base_time_ms, 658 base_free_regions, target_pause_time_ms), 659 "min_young_length, the result of the binary search, should " 660 "fit into the pause target"); 661 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 662 base_free_regions, target_pause_time_ms), 663 "min_young_length, the result of the binary search, should be " 664 "optimal, so no larger length should fit into the pause target"); 665 } 666 } else { 667 // Even the minimum length doesn't fit into the pause time 668 // target, return it as the result nevertheless. 669 } 670 return base_min_length + min_young_length; 671 } 672 673 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 674 double survivor_regions_evac_time = 0.0; 675 for (HeapRegion * r = _recorded_survivor_head; 676 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 677 r = r->get_next_young_region()) { 678 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); 679 } 680 return survivor_regions_evac_time; 681 } 682 683 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 684 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 685 686 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 687 if (rs_lengths > _rs_lengths_prediction) { 688 // add 10% to avoid having to recalculate often 689 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 690 update_young_list_target_length(rs_lengths_prediction); 691 } 692 } 693 694 695 696 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 697 bool is_tlab, 698 bool* gc_overhead_limit_was_exceeded) { 699 guarantee(false, "Not using this policy feature yet."); 700 return NULL; 701 } 702 703 // This method controls how a collector handles one or more 704 // of its generations being fully allocated. 705 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 706 bool is_tlab) { 707 guarantee(false, "Not using this policy feature yet."); 708 return NULL; 709 } 710 711 712 #ifndef PRODUCT 713 bool G1CollectorPolicy::verify_young_ages() { 714 HeapRegion* head = _g1->young_list()->first_region(); 715 return 716 verify_young_ages(head, _short_lived_surv_rate_group); 717 // also call verify_young_ages on any additional surv rate groups 718 } 719 720 bool 721 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 722 SurvRateGroup *surv_rate_group) { 723 guarantee( surv_rate_group != NULL, "pre-condition" ); 724 725 const char* name = surv_rate_group->name(); 726 bool ret = true; 727 int prev_age = -1; 728 729 for (HeapRegion* curr = head; 730 curr != NULL; 731 curr = curr->get_next_young_region()) { 732 SurvRateGroup* group = curr->surv_rate_group(); 733 if (group == NULL && !curr->is_survivor()) { 734 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 735 ret = false; 736 } 737 738 if (surv_rate_group == group) { 739 int age = curr->age_in_surv_rate_group(); 740 741 if (age < 0) { 742 gclog_or_tty->print_cr("## %s: encountered negative age", name); 743 ret = false; 744 } 745 746 if (age <= prev_age) { 747 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 748 "(%d, %d)", name, age, prev_age); 749 ret = false; 750 } 751 prev_age = age; 752 } 753 } 754 755 return ret; 756 } 757 #endif // PRODUCT 758 759 void G1CollectorPolicy::record_full_collection_start() { 760 _full_collection_start_sec = os::elapsedTime(); 761 record_heap_size_info_at_start(true /* full */); 762 // Release the future to-space so that it is available for compaction into. 763 _g1->set_full_collection(); 764 } 765 766 void G1CollectorPolicy::record_full_collection_end() { 767 // Consider this like a collection pause for the purposes of allocation 768 // since last pause. 769 double end_sec = os::elapsedTime(); 770 double full_gc_time_sec = end_sec - _full_collection_start_sec; 771 double full_gc_time_ms = full_gc_time_sec * 1000.0; 772 773 _trace_gen1_time_data.record_full_collection(full_gc_time_ms); 774 775 update_recent_gc_times(end_sec, full_gc_time_ms); 776 777 _g1->clear_full_collection(); 778 779 // "Nuke" the heuristics that control the young/mixed GC 780 // transitions and make sure we start with young GCs after the Full GC. 781 set_gcs_are_young(true); 782 _last_young_gc = false; 783 clear_initiate_conc_mark_if_possible(); 784 clear_during_initial_mark_pause(); 785 _in_marking_window = false; 786 _in_marking_window_im = false; 787 788 _short_lived_surv_rate_group->start_adding_regions(); 789 // also call this on any additional surv rate groups 790 791 record_survivor_regions(0, NULL, NULL); 792 793 _free_regions_at_end_of_collection = _g1->free_regions(); 794 // Reset survivors SurvRateGroup. 795 _survivor_surv_rate_group->reset(); 796 update_young_list_target_length(); 797 _collectionSetChooser->clear(); 798 } 799 800 void G1CollectorPolicy::record_stop_world_start() { 801 _stop_world_start = os::elapsedTime(); 802 } 803 804 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 805 // We only need to do this here as the policy will only be applied 806 // to the GC we're about to start. so, no point is calculating this 807 // every time we calculate / recalculate the target young length. 808 update_survivors_policy(); 809 810 assert(_g1->used() == _g1->recalculate_used(), 811 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 812 _g1->used(), _g1->recalculate_used())); 813 814 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 815 _trace_gen0_time_data.record_start_collection(s_w_t_ms); 816 _stop_world_start = 0.0; 817 818 record_heap_size_info_at_start(false /* full */); 819 820 phase_times()->record_cur_collection_start_sec(start_time_sec); 821 _pending_cards = _g1->pending_card_num(); 822 823 _collection_set_bytes_used_before = 0; 824 _bytes_copied_during_gc = 0; 825 826 _last_gc_was_young = false; 827 828 // do that for any other surv rate groups 829 _short_lived_surv_rate_group->stop_adding_regions(); 830 _survivors_age_table.clear(); 831 832 assert( verify_young_ages(), "region age verification" ); 833 } 834 835 void G1CollectorPolicy::record_concurrent_mark_init_end(double 836 mark_init_elapsed_time_ms) { 837 _during_marking = true; 838 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 839 clear_during_initial_mark_pause(); 840 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 841 } 842 843 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 844 _mark_remark_start_sec = os::elapsedTime(); 845 _during_marking = false; 846 } 847 848 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 849 double end_time_sec = os::elapsedTime(); 850 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 851 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 852 _cur_mark_stop_world_time_ms += elapsed_time_ms; 853 _prev_collection_pause_end_ms += elapsed_time_ms; 854 855 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 856 } 857 858 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 859 _mark_cleanup_start_sec = os::elapsedTime(); 860 } 861 862 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 863 _last_young_gc = true; 864 _in_marking_window = false; 865 } 866 867 void G1CollectorPolicy::record_concurrent_pause() { 868 if (_stop_world_start > 0.0) { 869 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 870 _trace_gen0_time_data.record_yield_time(yield_ms); 871 } 872 } 873 874 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 875 if (_g1->concurrent_mark()->cmThread()->during_cycle()) { 876 return false; 877 } 878 879 size_t marking_initiating_used_threshold = 880 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 881 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 882 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 883 884 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { 885 if (gcs_are_young() && !_last_young_gc) { 886 ergo_verbose5(ErgoConcCycles, 887 "request concurrent cycle initiation", 888 ergo_format_reason("occupancy higher than threshold") 889 ergo_format_byte("occupancy") 890 ergo_format_byte("allocation request") 891 ergo_format_byte_perc("threshold") 892 ergo_format_str("source"), 893 cur_used_bytes, 894 alloc_byte_size, 895 marking_initiating_used_threshold, 896 (double) InitiatingHeapOccupancyPercent, 897 source); 898 return true; 899 } else { 900 ergo_verbose5(ErgoConcCycles, 901 "do not request concurrent cycle initiation", 902 ergo_format_reason("still doing mixed collections") 903 ergo_format_byte("occupancy") 904 ergo_format_byte("allocation request") 905 ergo_format_byte_perc("threshold") 906 ergo_format_str("source"), 907 cur_used_bytes, 908 alloc_byte_size, 909 marking_initiating_used_threshold, 910 (double) InitiatingHeapOccupancyPercent, 911 source); 912 } 913 } 914 915 return false; 916 } 917 918 // Anything below that is considered to be zero 919 #define MIN_TIMER_GRANULARITY 0.0000001 920 921 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { 922 double end_time_sec = os::elapsedTime(); 923 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), 924 "otherwise, the subtraction below does not make sense"); 925 size_t rs_size = 926 _cur_collection_pause_used_regions_at_start - cset_region_length(); 927 size_t cur_used_bytes = _g1->used(); 928 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 929 bool last_pause_included_initial_mark = false; 930 bool update_stats = !_g1->evacuation_failed(); 931 932 #ifndef PRODUCT 933 if (G1YoungSurvRateVerbose) { 934 gclog_or_tty->print_cr(""); 935 _short_lived_surv_rate_group->print(); 936 // do that for any other surv rate groups too 937 } 938 #endif // PRODUCT 939 940 last_pause_included_initial_mark = during_initial_mark_pause(); 941 if (last_pause_included_initial_mark) { 942 record_concurrent_mark_init_end(0.0); 943 } else if (need_to_start_conc_mark("end of GC")) { 944 // Note: this might have already been set, if during the last 945 // pause we decided to start a cycle but at the beginning of 946 // this pause we decided to postpone it. That's OK. 947 set_initiate_conc_mark_if_possible(); 948 } 949 950 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, 951 end_time_sec, false); 952 953 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); 954 evacuation_info.set_bytes_copied(_bytes_copied_during_gc); 955 956 if (update_stats) { 957 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); 958 // this is where we update the allocation rate of the application 959 double app_time_ms = 960 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 961 if (app_time_ms < MIN_TIMER_GRANULARITY) { 962 // This usually happens due to the timer not having the required 963 // granularity. Some Linuxes are the usual culprits. 964 // We'll just set it to something (arbitrarily) small. 965 app_time_ms = 1.0; 966 } 967 // We maintain the invariant that all objects allocated by mutator 968 // threads will be allocated out of eden regions. So, we can use 969 // the eden region number allocated since the previous GC to 970 // calculate the application's allocate rate. The only exception 971 // to that is humongous objects that are allocated separately. But 972 // given that humongous object allocations do not really affect 973 // either the pause's duration nor when the next pause will take 974 // place we can safely ignore them here. 975 uint regions_allocated = eden_cset_region_length(); 976 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 977 _alloc_rate_ms_seq->add(alloc_rate_ms); 978 979 double interval_ms = 980 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 981 update_recent_gc_times(end_time_sec, pause_time_ms); 982 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 983 if (recent_avg_pause_time_ratio() < 0.0 || 984 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 985 #ifndef PRODUCT 986 // Dump info to allow post-facto debugging 987 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 988 gclog_or_tty->print_cr("-------------------------------------------"); 989 gclog_or_tty->print_cr("Recent GC Times (ms):"); 990 _recent_gc_times_ms->dump(); 991 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 992 _recent_prev_end_times_for_all_gcs_sec->dump(); 993 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 994 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 995 // In debug mode, terminate the JVM if the user wants to debug at this point. 996 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 997 #endif // !PRODUCT 998 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 999 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1000 if (_recent_avg_pause_time_ratio < 0.0) { 1001 _recent_avg_pause_time_ratio = 0.0; 1002 } else { 1003 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1004 _recent_avg_pause_time_ratio = 1.0; 1005 } 1006 } 1007 } 1008 1009 bool new_in_marking_window = _in_marking_window; 1010 bool new_in_marking_window_im = false; 1011 if (during_initial_mark_pause()) { 1012 new_in_marking_window = true; 1013 new_in_marking_window_im = true; 1014 } 1015 1016 if (_last_young_gc) { 1017 // This is supposed to to be the "last young GC" before we start 1018 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1019 1020 if (!last_pause_included_initial_mark) { 1021 if (next_gc_should_be_mixed("start mixed GCs", 1022 "do not start mixed GCs")) { 1023 set_gcs_are_young(false); 1024 } 1025 } else { 1026 ergo_verbose0(ErgoMixedGCs, 1027 "do not start mixed GCs", 1028 ergo_format_reason("concurrent cycle is about to start")); 1029 } 1030 _last_young_gc = false; 1031 } 1032 1033 if (!_last_gc_was_young) { 1034 // This is a mixed GC. Here we decide whether to continue doing 1035 // mixed GCs or not. 1036 1037 if (!next_gc_should_be_mixed("continue mixed GCs", 1038 "do not continue mixed GCs")) { 1039 set_gcs_are_young(true); 1040 } 1041 } 1042 1043 _short_lived_surv_rate_group->start_adding_regions(); 1044 // do that for any other surv rate groupsx 1045 1046 if (update_stats) { 1047 double cost_per_card_ms = 0.0; 1048 if (_pending_cards > 0) { 1049 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards; 1050 _cost_per_card_ms_seq->add(cost_per_card_ms); 1051 } 1052 1053 size_t cards_scanned = _g1->cards_scanned(); 1054 1055 double cost_per_entry_ms = 0.0; 1056 if (cards_scanned > 10) { 1057 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; 1058 if (_last_gc_was_young) { 1059 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1060 } else { 1061 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1062 } 1063 } 1064 1065 if (_max_rs_lengths > 0) { 1066 double cards_per_entry_ratio = 1067 (double) cards_scanned / (double) _max_rs_lengths; 1068 if (_last_gc_was_young) { 1069 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1070 } else { 1071 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1072 } 1073 } 1074 1075 // This is defensive. For a while _max_rs_lengths could get 1076 // smaller than _recorded_rs_lengths which was causing 1077 // rs_length_diff to get very large and mess up the RSet length 1078 // predictions. The reason was unsafe concurrent updates to the 1079 // _inc_cset_recorded_rs_lengths field which the code below guards 1080 // against (see CR 7118202). This bug has now been fixed (see CR 1081 // 7119027). However, I'm still worried that 1082 // _inc_cset_recorded_rs_lengths might still end up somewhat 1083 // inaccurate. The concurrent refinement thread calculates an 1084 // RSet's length concurrently with other CR threads updating it 1085 // which might cause it to calculate the length incorrectly (if, 1086 // say, it's in mid-coarsening). So I'll leave in the defensive 1087 // conditional below just in case. 1088 size_t rs_length_diff = 0; 1089 if (_max_rs_lengths > _recorded_rs_lengths) { 1090 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1091 } 1092 _rs_length_diff_seq->add((double) rs_length_diff); 1093 1094 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes; 1095 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1096 double cost_per_byte_ms = 0.0; 1097 1098 if (copied_bytes > 0) { 1099 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; 1100 if (_in_marking_window) { 1101 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1102 } else { 1103 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1104 } 1105 } 1106 1107 double all_other_time_ms = pause_time_ms - 1108 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time() 1109 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time()); 1110 1111 double young_other_time_ms = 0.0; 1112 if (young_cset_region_length() > 0) { 1113 young_other_time_ms = 1114 phase_times()->young_cset_choice_time_ms() + 1115 phase_times()->young_free_cset_time_ms(); 1116 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1117 (double) young_cset_region_length()); 1118 } 1119 double non_young_other_time_ms = 0.0; 1120 if (old_cset_region_length() > 0) { 1121 non_young_other_time_ms = 1122 phase_times()->non_young_cset_choice_time_ms() + 1123 phase_times()->non_young_free_cset_time_ms(); 1124 1125 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1126 (double) old_cset_region_length()); 1127 } 1128 1129 double constant_other_time_ms = all_other_time_ms - 1130 (young_other_time_ms + non_young_other_time_ms); 1131 _constant_other_time_ms_seq->add(constant_other_time_ms); 1132 1133 double survival_ratio = 0.0; 1134 if (_collection_set_bytes_used_before > 0) { 1135 survival_ratio = (double) _bytes_copied_during_gc / 1136 (double) _collection_set_bytes_used_before; 1137 } 1138 1139 _pending_cards_seq->add((double) _pending_cards); 1140 _rs_lengths_seq->add((double) _max_rs_lengths); 1141 } 1142 1143 _in_marking_window = new_in_marking_window; 1144 _in_marking_window_im = new_in_marking_window_im; 1145 _free_regions_at_end_of_collection = _g1->free_regions(); 1146 update_young_list_target_length(); 1147 1148 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1149 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1150 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), 1151 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms); 1152 1153 _collectionSetChooser->verify(); 1154 } 1155 1156 #define EXT_SIZE_FORMAT "%.1f%s" 1157 #define EXT_SIZE_PARAMS(bytes) \ 1158 byte_size_in_proper_unit((double)(bytes)), \ 1159 proper_unit_for_byte_size((bytes)) 1160 1161 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { 1162 YoungList* young_list = _g1->young_list(); 1163 _eden_used_bytes_before_gc = young_list->eden_used_bytes(); 1164 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); 1165 _heap_capacity_bytes_before_gc = _g1->capacity(); 1166 _heap_used_bytes_before_gc = _g1->used(); 1167 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 1168 1169 _eden_capacity_bytes_before_gc = 1170 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; 1171 1172 if (full) { 1173 _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes(); 1174 } 1175 } 1176 1177 void G1CollectorPolicy::print_heap_transition() { 1178 _g1->print_size_transition(gclog_or_tty, 1179 _heap_used_bytes_before_gc, 1180 _g1->used(), 1181 _g1->capacity()); 1182 } 1183 1184 void G1CollectorPolicy::print_detailed_heap_transition(bool full) { 1185 YoungList* young_list = _g1->young_list(); 1186 1187 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); 1188 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); 1189 size_t heap_used_bytes_after_gc = _g1->used(); 1190 1191 size_t heap_capacity_bytes_after_gc = _g1->capacity(); 1192 size_t eden_capacity_bytes_after_gc = 1193 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; 1194 1195 gclog_or_tty->print( 1196 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " 1197 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1198 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1199 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1200 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), 1201 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), 1202 EXT_SIZE_PARAMS(eden_used_bytes_after_gc), 1203 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), 1204 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), 1205 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), 1206 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), 1207 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), 1208 EXT_SIZE_PARAMS(heap_used_bytes_after_gc), 1209 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); 1210 1211 if (full) { 1212 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); 1213 } 1214 1215 gclog_or_tty->cr(); 1216 } 1217 1218 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1219 double update_rs_processed_buffers, 1220 double goal_ms) { 1221 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1222 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1223 1224 if (G1UseAdaptiveConcRefinement) { 1225 const int k_gy = 3, k_gr = 6; 1226 const double inc_k = 1.1, dec_k = 0.9; 1227 1228 int g = cg1r->green_zone(); 1229 if (update_rs_time > goal_ms) { 1230 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1231 } else { 1232 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1233 g = (int)MAX2(g * inc_k, g + 1.0); 1234 } 1235 } 1236 // Change the refinement threads params 1237 cg1r->set_green_zone(g); 1238 cg1r->set_yellow_zone(g * k_gy); 1239 cg1r->set_red_zone(g * k_gr); 1240 cg1r->reinitialize_threads(); 1241 1242 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1243 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1244 cg1r->yellow_zone()); 1245 // Change the barrier params 1246 dcqs.set_process_completed_threshold(processing_threshold); 1247 dcqs.set_max_completed_queue(cg1r->red_zone()); 1248 } 1249 1250 int curr_queue_size = dcqs.completed_buffers_num(); 1251 if (curr_queue_size >= cg1r->yellow_zone()) { 1252 dcqs.set_completed_queue_padding(curr_queue_size); 1253 } else { 1254 dcqs.set_completed_queue_padding(0); 1255 } 1256 dcqs.notify_if_necessary(); 1257 } 1258 1259 double 1260 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1261 size_t scanned_cards) { 1262 return 1263 predict_rs_update_time_ms(pending_cards) + 1264 predict_rs_scan_time_ms(scanned_cards) + 1265 predict_constant_other_time_ms(); 1266 } 1267 1268 double 1269 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1270 size_t rs_length = predict_rs_length_diff(); 1271 size_t card_num; 1272 if (gcs_are_young()) { 1273 card_num = predict_young_card_num(rs_length); 1274 } else { 1275 card_num = predict_non_young_card_num(rs_length); 1276 } 1277 return predict_base_elapsed_time_ms(pending_cards, card_num); 1278 } 1279 1280 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1281 size_t bytes_to_copy; 1282 if (hr->is_marked()) 1283 bytes_to_copy = hr->max_live_bytes(); 1284 else { 1285 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1286 int age = hr->age_in_surv_rate_group(); 1287 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1288 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1289 } 1290 return bytes_to_copy; 1291 } 1292 1293 double 1294 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1295 bool for_young_gc) { 1296 size_t rs_length = hr->rem_set()->occupied(); 1297 size_t card_num; 1298 1299 // Predicting the number of cards is based on which type of GC 1300 // we're predicting for. 1301 if (for_young_gc) { 1302 card_num = predict_young_card_num(rs_length); 1303 } else { 1304 card_num = predict_non_young_card_num(rs_length); 1305 } 1306 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1307 1308 double region_elapsed_time_ms = 1309 predict_rs_scan_time_ms(card_num) + 1310 predict_object_copy_time_ms(bytes_to_copy); 1311 1312 // The prediction of the "other" time for this region is based 1313 // upon the region type and NOT the GC type. 1314 if (hr->is_young()) { 1315 region_elapsed_time_ms += predict_young_other_time_ms(1); 1316 } else { 1317 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1318 } 1319 return region_elapsed_time_ms; 1320 } 1321 1322 void 1323 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1324 uint survivor_cset_region_length) { 1325 _eden_cset_region_length = eden_cset_region_length; 1326 _survivor_cset_region_length = survivor_cset_region_length; 1327 _old_cset_region_length = 0; 1328 } 1329 1330 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1331 _recorded_rs_lengths = rs_lengths; 1332 } 1333 1334 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1335 double elapsed_ms) { 1336 _recent_gc_times_ms->add(elapsed_ms); 1337 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1338 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1339 } 1340 1341 size_t G1CollectorPolicy::expansion_amount() { 1342 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1343 double threshold = _gc_overhead_perc; 1344 if (recent_gc_overhead > threshold) { 1345 // We will double the existing space, or take 1346 // G1ExpandByPercentOfAvailable % of the available expansion 1347 // space, whichever is smaller, bounded below by a minimum 1348 // expansion (unless that's all that's left.) 1349 const size_t min_expand_bytes = 1*M; 1350 size_t reserved_bytes = _g1->max_capacity(); 1351 size_t committed_bytes = _g1->capacity(); 1352 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1353 size_t expand_bytes; 1354 size_t expand_bytes_via_pct = 1355 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1356 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1357 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1358 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1359 1360 ergo_verbose5(ErgoHeapSizing, 1361 "attempt heap expansion", 1362 ergo_format_reason("recent GC overhead higher than " 1363 "threshold after GC") 1364 ergo_format_perc("recent GC overhead") 1365 ergo_format_perc("threshold") 1366 ergo_format_byte("uncommitted") 1367 ergo_format_byte_perc("calculated expansion amount"), 1368 recent_gc_overhead, threshold, 1369 uncommitted_bytes, 1370 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable); 1371 1372 return expand_bytes; 1373 } else { 1374 return 0; 1375 } 1376 } 1377 1378 void G1CollectorPolicy::print_tracing_info() const { 1379 _trace_gen0_time_data.print(); 1380 _trace_gen1_time_data.print(); 1381 } 1382 1383 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1384 #ifndef PRODUCT 1385 _short_lived_surv_rate_group->print_surv_rate_summary(); 1386 // add this call for any other surv rate groups 1387 #endif // PRODUCT 1388 } 1389 1390 uint G1CollectorPolicy::max_regions(int purpose) { 1391 switch (purpose) { 1392 case GCAllocForSurvived: 1393 return _max_survivor_regions; 1394 case GCAllocForTenured: 1395 return REGIONS_UNLIMITED; 1396 default: 1397 ShouldNotReachHere(); 1398 return REGIONS_UNLIMITED; 1399 }; 1400 } 1401 1402 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1403 uint expansion_region_num = 0; 1404 if (GCLockerEdenExpansionPercent > 0) { 1405 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1406 double expansion_region_num_d = perc * (double) _young_list_target_length; 1407 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1408 // less than 1.0) we'll get 1. 1409 expansion_region_num = (uint) ceil(expansion_region_num_d); 1410 } else { 1411 assert(expansion_region_num == 0, "sanity"); 1412 } 1413 _young_list_max_length = _young_list_target_length + expansion_region_num; 1414 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1415 } 1416 1417 // Calculates survivor space parameters. 1418 void G1CollectorPolicy::update_survivors_policy() { 1419 double max_survivor_regions_d = 1420 (double) _young_list_target_length / (double) SurvivorRatio; 1421 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1422 // smaller than 1.0) we'll get 1. 1423 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1424 1425 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1426 HeapRegion::GrainWords * _max_survivor_regions); 1427 } 1428 1429 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( 1430 GCCause::Cause gc_cause) { 1431 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1432 if (!during_cycle) { 1433 ergo_verbose1(ErgoConcCycles, 1434 "request concurrent cycle initiation", 1435 ergo_format_reason("requested by GC cause") 1436 ergo_format_str("GC cause"), 1437 GCCause::to_string(gc_cause)); 1438 set_initiate_conc_mark_if_possible(); 1439 return true; 1440 } else { 1441 ergo_verbose1(ErgoConcCycles, 1442 "do not request concurrent cycle initiation", 1443 ergo_format_reason("concurrent cycle already in progress") 1444 ergo_format_str("GC cause"), 1445 GCCause::to_string(gc_cause)); 1446 return false; 1447 } 1448 } 1449 1450 void 1451 G1CollectorPolicy::decide_on_conc_mark_initiation() { 1452 // We are about to decide on whether this pause will be an 1453 // initial-mark pause. 1454 1455 // First, during_initial_mark_pause() should not be already set. We 1456 // will set it here if we have to. However, it should be cleared by 1457 // the end of the pause (it's only set for the duration of an 1458 // initial-mark pause). 1459 assert(!during_initial_mark_pause(), "pre-condition"); 1460 1461 if (initiate_conc_mark_if_possible()) { 1462 // We had noticed on a previous pause that the heap occupancy has 1463 // gone over the initiating threshold and we should start a 1464 // concurrent marking cycle. So we might initiate one. 1465 1466 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1467 if (!during_cycle) { 1468 // The concurrent marking thread is not "during a cycle", i.e., 1469 // it has completed the last one. So we can go ahead and 1470 // initiate a new cycle. 1471 1472 set_during_initial_mark_pause(); 1473 // We do not allow mixed GCs during marking. 1474 if (!gcs_are_young()) { 1475 set_gcs_are_young(true); 1476 ergo_verbose0(ErgoMixedGCs, 1477 "end mixed GCs", 1478 ergo_format_reason("concurrent cycle is about to start")); 1479 } 1480 1481 // And we can now clear initiate_conc_mark_if_possible() as 1482 // we've already acted on it. 1483 clear_initiate_conc_mark_if_possible(); 1484 1485 ergo_verbose0(ErgoConcCycles, 1486 "initiate concurrent cycle", 1487 ergo_format_reason("concurrent cycle initiation requested")); 1488 } else { 1489 // The concurrent marking thread is still finishing up the 1490 // previous cycle. If we start one right now the two cycles 1491 // overlap. In particular, the concurrent marking thread might 1492 // be in the process of clearing the next marking bitmap (which 1493 // we will use for the next cycle if we start one). Starting a 1494 // cycle now will be bad given that parts of the marking 1495 // information might get cleared by the marking thread. And we 1496 // cannot wait for the marking thread to finish the cycle as it 1497 // periodically yields while clearing the next marking bitmap 1498 // and, if it's in a yield point, it's waiting for us to 1499 // finish. So, at this point we will not start a cycle and we'll 1500 // let the concurrent marking thread complete the last one. 1501 ergo_verbose0(ErgoConcCycles, 1502 "do not initiate concurrent cycle", 1503 ergo_format_reason("concurrent cycle already in progress")); 1504 } 1505 } 1506 } 1507 1508 class KnownGarbageClosure: public HeapRegionClosure { 1509 G1CollectedHeap* _g1h; 1510 CollectionSetChooser* _hrSorted; 1511 1512 public: 1513 KnownGarbageClosure(CollectionSetChooser* hrSorted) : 1514 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { } 1515 1516 bool doHeapRegion(HeapRegion* r) { 1517 // We only include humongous regions in collection 1518 // sets when concurrent mark shows that their contained object is 1519 // unreachable. 1520 1521 // Do we have any marking information for this region? 1522 if (r->is_marked()) { 1523 // We will skip any region that's currently used as an old GC 1524 // alloc region (we should not consider those for collection 1525 // before we fill them up). 1526 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1527 _hrSorted->add_region(r); 1528 } 1529 } 1530 return false; 1531 } 1532 }; 1533 1534 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1535 G1CollectedHeap* _g1h; 1536 CSetChooserParUpdater _cset_updater; 1537 1538 public: 1539 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1540 uint chunk_size) : 1541 _g1h(G1CollectedHeap::heap()), 1542 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1543 1544 bool doHeapRegion(HeapRegion* r) { 1545 // Do we have any marking information for this region? 1546 if (r->is_marked()) { 1547 // We will skip any region that's currently used as an old GC 1548 // alloc region (we should not consider those for collection 1549 // before we fill them up). 1550 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1551 _cset_updater.add_region(r); 1552 } 1553 } 1554 return false; 1555 } 1556 }; 1557 1558 class ParKnownGarbageTask: public AbstractGangTask { 1559 CollectionSetChooser* _hrSorted; 1560 uint _chunk_size; 1561 G1CollectedHeap* _g1; 1562 public: 1563 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : 1564 AbstractGangTask("ParKnownGarbageTask"), 1565 _hrSorted(hrSorted), _chunk_size(chunk_size), 1566 _g1(G1CollectedHeap::heap()) { } 1567 1568 void work(uint worker_id) { 1569 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1570 1571 // Back to zero for the claim value. 1572 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, 1573 _g1->workers()->active_workers(), 1574 HeapRegion::InitialClaimValue); 1575 } 1576 }; 1577 1578 void 1579 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 1580 _collectionSetChooser->clear(); 1581 1582 uint region_num = _g1->n_regions(); 1583 if (G1CollectedHeap::use_parallel_gc_threads()) { 1584 const uint OverpartitionFactor = 4; 1585 uint WorkUnit; 1586 // The use of MinChunkSize = 8 in the original code 1587 // causes some assertion failures when the total number of 1588 // region is less than 8. The code here tries to fix that. 1589 // Should the original code also be fixed? 1590 if (no_of_gc_threads > 0) { 1591 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); 1592 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), 1593 MinWorkUnit); 1594 } else { 1595 assert(no_of_gc_threads > 0, 1596 "The active gc workers should be greater than 0"); 1597 // In a product build do something reasonable to avoid a crash. 1598 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); 1599 WorkUnit = 1600 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), 1601 MinWorkUnit); 1602 } 1603 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), 1604 WorkUnit); 1605 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 1606 (int) WorkUnit); 1607 _g1->workers()->run_task(&parKnownGarbageTask); 1608 1609 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1610 "sanity check"); 1611 } else { 1612 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 1613 _g1->heap_region_iterate(&knownGarbagecl); 1614 } 1615 1616 _collectionSetChooser->sort_regions(); 1617 1618 double end_sec = os::elapsedTime(); 1619 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1620 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1621 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1622 _prev_collection_pause_end_ms += elapsed_time_ms; 1623 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); 1624 } 1625 1626 // Add the heap region at the head of the non-incremental collection set 1627 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1628 assert(_inc_cset_build_state == Active, "Precondition"); 1629 assert(!hr->is_young(), "non-incremental add of young region"); 1630 1631 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1632 hr->set_in_collection_set(true); 1633 hr->set_next_in_collection_set(_collection_set); 1634 _collection_set = hr; 1635 _collection_set_bytes_used_before += hr->used(); 1636 _g1->register_region_with_in_cset_fast_test(hr); 1637 size_t rs_length = hr->rem_set()->occupied(); 1638 _recorded_rs_lengths += rs_length; 1639 _old_cset_region_length += 1; 1640 } 1641 1642 // Initialize the per-collection-set information 1643 void G1CollectorPolicy::start_incremental_cset_building() { 1644 assert(_inc_cset_build_state == Inactive, "Precondition"); 1645 1646 _inc_cset_head = NULL; 1647 _inc_cset_tail = NULL; 1648 _inc_cset_bytes_used_before = 0; 1649 1650 _inc_cset_max_finger = 0; 1651 _inc_cset_recorded_rs_lengths = 0; 1652 _inc_cset_recorded_rs_lengths_diffs = 0; 1653 _inc_cset_predicted_elapsed_time_ms = 0.0; 1654 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1655 _inc_cset_build_state = Active; 1656 } 1657 1658 void G1CollectorPolicy::finalize_incremental_cset_building() { 1659 assert(_inc_cset_build_state == Active, "Precondition"); 1660 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1661 1662 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1663 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1664 // that adds a new region to the CSet. Further updates by the 1665 // concurrent refinement thread that samples the young RSet lengths 1666 // are accumulated in the *_diffs fields. Here we add the diffs to 1667 // the "main" fields. 1668 1669 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1670 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1671 } else { 1672 // This is defensive. The diff should in theory be always positive 1673 // as RSets can only grow between GCs. However, given that we 1674 // sample their size concurrently with other threads updating them 1675 // it's possible that we might get the wrong size back, which 1676 // could make the calculations somewhat inaccurate. 1677 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1678 if (_inc_cset_recorded_rs_lengths >= diffs) { 1679 _inc_cset_recorded_rs_lengths -= diffs; 1680 } else { 1681 _inc_cset_recorded_rs_lengths = 0; 1682 } 1683 } 1684 _inc_cset_predicted_elapsed_time_ms += 1685 _inc_cset_predicted_elapsed_time_ms_diffs; 1686 1687 _inc_cset_recorded_rs_lengths_diffs = 0; 1688 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1689 } 1690 1691 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1692 // This routine is used when: 1693 // * adding survivor regions to the incremental cset at the end of an 1694 // evacuation pause, 1695 // * adding the current allocation region to the incremental cset 1696 // when it is retired, and 1697 // * updating existing policy information for a region in the 1698 // incremental cset via young list RSet sampling. 1699 // Therefore this routine may be called at a safepoint by the 1700 // VM thread, or in-between safepoints by mutator threads (when 1701 // retiring the current allocation region) or a concurrent 1702 // refine thread (RSet sampling). 1703 1704 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1705 size_t used_bytes = hr->used(); 1706 _inc_cset_recorded_rs_lengths += rs_length; 1707 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1708 _inc_cset_bytes_used_before += used_bytes; 1709 1710 // Cache the values we have added to the aggregated informtion 1711 // in the heap region in case we have to remove this region from 1712 // the incremental collection set, or it is updated by the 1713 // rset sampling code 1714 hr->set_recorded_rs_length(rs_length); 1715 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1716 } 1717 1718 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1719 size_t new_rs_length) { 1720 // Update the CSet information that is dependent on the new RS length 1721 assert(hr->is_young(), "Precondition"); 1722 assert(!SafepointSynchronize::is_at_safepoint(), 1723 "should not be at a safepoint"); 1724 1725 // We could have updated _inc_cset_recorded_rs_lengths and 1726 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1727 // that atomically, as this code is executed by a concurrent 1728 // refinement thread, potentially concurrently with a mutator thread 1729 // allocating a new region and also updating the same fields. To 1730 // avoid the atomic operations we accumulate these updates on two 1731 // separate fields (*_diffs) and we'll just add them to the "main" 1732 // fields at the start of a GC. 1733 1734 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1735 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1736 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1737 1738 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1739 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1740 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1741 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1742 1743 hr->set_recorded_rs_length(new_rs_length); 1744 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1745 } 1746 1747 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1748 assert(hr->is_young(), "invariant"); 1749 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1750 assert(_inc_cset_build_state == Active, "Precondition"); 1751 1752 // We need to clear and set the cached recorded/cached collection set 1753 // information in the heap region here (before the region gets added 1754 // to the collection set). An individual heap region's cached values 1755 // are calculated, aggregated with the policy collection set info, 1756 // and cached in the heap region here (initially) and (subsequently) 1757 // by the Young List sampling code. 1758 1759 size_t rs_length = hr->rem_set()->occupied(); 1760 add_to_incremental_cset_info(hr, rs_length); 1761 1762 HeapWord* hr_end = hr->end(); 1763 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1764 1765 assert(!hr->in_collection_set(), "invariant"); 1766 hr->set_in_collection_set(true); 1767 assert( hr->next_in_collection_set() == NULL, "invariant"); 1768 1769 _g1->register_region_with_in_cset_fast_test(hr); 1770 } 1771 1772 // Add the region at the RHS of the incremental cset 1773 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1774 // We should only ever be appending survivors at the end of a pause 1775 assert( hr->is_survivor(), "Logic"); 1776 1777 // Do the 'common' stuff 1778 add_region_to_incremental_cset_common(hr); 1779 1780 // Now add the region at the right hand side 1781 if (_inc_cset_tail == NULL) { 1782 assert(_inc_cset_head == NULL, "invariant"); 1783 _inc_cset_head = hr; 1784 } else { 1785 _inc_cset_tail->set_next_in_collection_set(hr); 1786 } 1787 _inc_cset_tail = hr; 1788 } 1789 1790 // Add the region to the LHS of the incremental cset 1791 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1792 // Survivors should be added to the RHS at the end of a pause 1793 assert(!hr->is_survivor(), "Logic"); 1794 1795 // Do the 'common' stuff 1796 add_region_to_incremental_cset_common(hr); 1797 1798 // Add the region at the left hand side 1799 hr->set_next_in_collection_set(_inc_cset_head); 1800 if (_inc_cset_head == NULL) { 1801 assert(_inc_cset_tail == NULL, "Invariant"); 1802 _inc_cset_tail = hr; 1803 } 1804 _inc_cset_head = hr; 1805 } 1806 1807 #ifndef PRODUCT 1808 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1809 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 1810 1811 st->print_cr("\nCollection_set:"); 1812 HeapRegion* csr = list_head; 1813 while (csr != NULL) { 1814 HeapRegion* next = csr->next_in_collection_set(); 1815 assert(csr->in_collection_set(), "bad CS"); 1816 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", 1817 HR_FORMAT_PARAMS(csr), 1818 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(), 1819 csr->age_in_surv_rate_group_cond()); 1820 csr = next; 1821 } 1822 } 1823 #endif // !PRODUCT 1824 1825 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) { 1826 // Returns the given amount of reclaimable bytes (that represents 1827 // the amount of reclaimable space still to be collected) as a 1828 // percentage of the current heap capacity. 1829 size_t capacity_bytes = _g1->capacity(); 1830 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1831 } 1832 1833 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1834 const char* false_action_str) { 1835 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1836 if (cset_chooser->is_empty()) { 1837 ergo_verbose0(ErgoMixedGCs, 1838 false_action_str, 1839 ergo_format_reason("candidate old regions not available")); 1840 return false; 1841 } 1842 1843 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1844 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 1845 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1846 double threshold = (double) G1HeapWastePercent; 1847 if (reclaimable_perc <= threshold) { 1848 ergo_verbose4(ErgoMixedGCs, 1849 false_action_str, 1850 ergo_format_reason("reclaimable percentage not over threshold") 1851 ergo_format_region("candidate old regions") 1852 ergo_format_byte_perc("reclaimable") 1853 ergo_format_perc("threshold"), 1854 cset_chooser->remaining_regions(), 1855 reclaimable_bytes, 1856 reclaimable_perc, threshold); 1857 return false; 1858 } 1859 1860 ergo_verbose4(ErgoMixedGCs, 1861 true_action_str, 1862 ergo_format_reason("candidate old regions available") 1863 ergo_format_region("candidate old regions") 1864 ergo_format_byte_perc("reclaimable") 1865 ergo_format_perc("threshold"), 1866 cset_chooser->remaining_regions(), 1867 reclaimable_bytes, 1868 reclaimable_perc, threshold); 1869 return true; 1870 } 1871 1872 uint G1CollectorPolicy::calc_min_old_cset_length() { 1873 // The min old CSet region bound is based on the maximum desired 1874 // number of mixed GCs after a cycle. I.e., even if some old regions 1875 // look expensive, we should add them to the CSet anyway to make 1876 // sure we go through the available old regions in no more than the 1877 // maximum desired number of mixed GCs. 1878 // 1879 // The calculation is based on the number of marked regions we added 1880 // to the CSet chooser in the first place, not how many remain, so 1881 // that the result is the same during all mixed GCs that follow a cycle. 1882 1883 const size_t region_num = (size_t) _collectionSetChooser->length(); 1884 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1885 size_t result = region_num / gc_num; 1886 // emulate ceiling 1887 if (result * gc_num < region_num) { 1888 result += 1; 1889 } 1890 return (uint) result; 1891 } 1892 1893 uint G1CollectorPolicy::calc_max_old_cset_length() { 1894 // The max old CSet region bound is based on the threshold expressed 1895 // as a percentage of the heap size. I.e., it should bound the 1896 // number of old regions added to the CSet irrespective of how many 1897 // of them are available. 1898 1899 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1900 const size_t region_num = g1h->n_regions(); 1901 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1902 size_t result = region_num * perc / 100; 1903 // emulate ceiling 1904 if (100 * result < region_num * perc) { 1905 result += 1; 1906 } 1907 return (uint) result; 1908 } 1909 1910 1911 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { 1912 double young_start_time_sec = os::elapsedTime(); 1913 1914 YoungList* young_list = _g1->young_list(); 1915 finalize_incremental_cset_building(); 1916 1917 guarantee(target_pause_time_ms > 0.0, 1918 err_msg("target_pause_time_ms = %1.6lf should be positive", 1919 target_pause_time_ms)); 1920 guarantee(_collection_set == NULL, "Precondition"); 1921 1922 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 1923 double predicted_pause_time_ms = base_time_ms; 1924 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 1925 1926 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 1927 "start choosing CSet", 1928 ergo_format_size("_pending_cards") 1929 ergo_format_ms("predicted base time") 1930 ergo_format_ms("remaining time") 1931 ergo_format_ms("target pause time"), 1932 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 1933 1934 _last_gc_was_young = gcs_are_young() ? true : false; 1935 1936 if (_last_gc_was_young) { 1937 _trace_gen0_time_data.increment_young_collection_count(); 1938 } else { 1939 _trace_gen0_time_data.increment_mixed_collection_count(); 1940 } 1941 1942 // The young list is laid with the survivor regions from the previous 1943 // pause are appended to the RHS of the young list, i.e. 1944 // [Newly Young Regions ++ Survivors from last pause]. 1945 1946 uint survivor_region_length = young_list->survivor_length(); 1947 uint eden_region_length = young_list->length() - survivor_region_length; 1948 init_cset_region_lengths(eden_region_length, survivor_region_length); 1949 1950 HeapRegion* hr = young_list->first_survivor_region(); 1951 while (hr != NULL) { 1952 assert(hr->is_survivor(), "badly formed young list"); 1953 hr->set_young(); 1954 hr = hr->get_next_young_region(); 1955 } 1956 1957 // Clear the fields that point to the survivor list - they are all young now. 1958 young_list->clear_survivors(); 1959 1960 _collection_set = _inc_cset_head; 1961 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 1962 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 1963 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 1964 1965 ergo_verbose3(ErgoCSetConstruction | ErgoHigh, 1966 "add young regions to CSet", 1967 ergo_format_region("eden") 1968 ergo_format_region("survivors") 1969 ergo_format_ms("predicted young region time"), 1970 eden_region_length, survivor_region_length, 1971 _inc_cset_predicted_elapsed_time_ms); 1972 1973 // The number of recorded young regions is the incremental 1974 // collection set's current size 1975 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 1976 1977 double young_end_time_sec = os::elapsedTime(); 1978 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 1979 1980 // Set the start of the non-young choice time. 1981 double non_young_start_time_sec = young_end_time_sec; 1982 1983 if (!gcs_are_young()) { 1984 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1985 cset_chooser->verify(); 1986 const uint min_old_cset_length = calc_min_old_cset_length(); 1987 const uint max_old_cset_length = calc_max_old_cset_length(); 1988 1989 uint expensive_region_num = 0; 1990 bool check_time_remaining = adaptive_young_list_length(); 1991 1992 HeapRegion* hr = cset_chooser->peek(); 1993 while (hr != NULL) { 1994 if (old_cset_region_length() >= max_old_cset_length) { 1995 // Added maximum number of old regions to the CSet. 1996 ergo_verbose2(ErgoCSetConstruction, 1997 "finish adding old regions to CSet", 1998 ergo_format_reason("old CSet region num reached max") 1999 ergo_format_region("old") 2000 ergo_format_region("max"), 2001 old_cset_region_length(), max_old_cset_length); 2002 break; 2003 } 2004 2005 2006 // Stop adding regions if the remaining reclaimable space is 2007 // not above G1HeapWastePercent. 2008 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2009 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2010 double threshold = (double) G1HeapWastePercent; 2011 if (reclaimable_perc <= threshold) { 2012 // We've added enough old regions that the amount of uncollected 2013 // reclaimable space is at or below the waste threshold. Stop 2014 // adding old regions to the CSet. 2015 ergo_verbose5(ErgoCSetConstruction, 2016 "finish adding old regions to CSet", 2017 ergo_format_reason("reclaimable percentage not over threshold") 2018 ergo_format_region("old") 2019 ergo_format_region("max") 2020 ergo_format_byte_perc("reclaimable") 2021 ergo_format_perc("threshold"), 2022 old_cset_region_length(), 2023 max_old_cset_length, 2024 reclaimable_bytes, 2025 reclaimable_perc, threshold); 2026 break; 2027 } 2028 2029 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 2030 if (check_time_remaining) { 2031 if (predicted_time_ms > time_remaining_ms) { 2032 // Too expensive for the current CSet. 2033 2034 if (old_cset_region_length() >= min_old_cset_length) { 2035 // We have added the minimum number of old regions to the CSet, 2036 // we are done with this CSet. 2037 ergo_verbose4(ErgoCSetConstruction, 2038 "finish adding old regions to CSet", 2039 ergo_format_reason("predicted time is too high") 2040 ergo_format_ms("predicted time") 2041 ergo_format_ms("remaining time") 2042 ergo_format_region("old") 2043 ergo_format_region("min"), 2044 predicted_time_ms, time_remaining_ms, 2045 old_cset_region_length(), min_old_cset_length); 2046 break; 2047 } 2048 2049 // We'll add it anyway given that we haven't reached the 2050 // minimum number of old regions. 2051 expensive_region_num += 1; 2052 } 2053 } else { 2054 if (old_cset_region_length() >= min_old_cset_length) { 2055 // In the non-auto-tuning case, we'll finish adding regions 2056 // to the CSet if we reach the minimum. 2057 ergo_verbose2(ErgoCSetConstruction, 2058 "finish adding old regions to CSet", 2059 ergo_format_reason("old CSet region num reached min") 2060 ergo_format_region("old") 2061 ergo_format_region("min"), 2062 old_cset_region_length(), min_old_cset_length); 2063 break; 2064 } 2065 } 2066 2067 // We will add this region to the CSet. 2068 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2069 predicted_pause_time_ms += predicted_time_ms; 2070 cset_chooser->remove_and_move_to_next(hr); 2071 _g1->old_set_remove(hr); 2072 add_old_region_to_cset(hr); 2073 2074 hr = cset_chooser->peek(); 2075 } 2076 if (hr == NULL) { 2077 ergo_verbose0(ErgoCSetConstruction, 2078 "finish adding old regions to CSet", 2079 ergo_format_reason("candidate old regions not available")); 2080 } 2081 2082 if (expensive_region_num > 0) { 2083 // We print the information once here at the end, predicated on 2084 // whether we added any apparently expensive regions or not, to 2085 // avoid generating output per region. 2086 ergo_verbose4(ErgoCSetConstruction, 2087 "added expensive regions to CSet", 2088 ergo_format_reason("old CSet region num not reached min") 2089 ergo_format_region("old") 2090 ergo_format_region("expensive") 2091 ergo_format_region("min") 2092 ergo_format_ms("remaining time"), 2093 old_cset_region_length(), 2094 expensive_region_num, 2095 min_old_cset_length, 2096 time_remaining_ms); 2097 } 2098 2099 cset_chooser->verify(); 2100 } 2101 2102 stop_incremental_cset_building(); 2103 2104 ergo_verbose5(ErgoCSetConstruction, 2105 "finish choosing CSet", 2106 ergo_format_region("eden") 2107 ergo_format_region("survivors") 2108 ergo_format_region("old") 2109 ergo_format_ms("predicted pause time") 2110 ergo_format_ms("target pause time"), 2111 eden_region_length, survivor_region_length, 2112 old_cset_region_length(), 2113 predicted_pause_time_ms, target_pause_time_ms); 2114 2115 double non_young_end_time_sec = os::elapsedTime(); 2116 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2117 evacuation_info.set_collectionset_regions(cset_region_length()); 2118 } 2119 2120 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { 2121 if(TraceGen0Time) { 2122 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2123 } 2124 } 2125 2126 void TraceGen0TimeData::record_yield_time(double yield_time_ms) { 2127 if(TraceGen0Time) { 2128 _all_yield_times_ms.add(yield_time_ms); 2129 } 2130 } 2131 2132 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2133 if(TraceGen0Time) { 2134 _total.add(pause_time_ms); 2135 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2136 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2137 _parallel.add(phase_times->cur_collection_par_time_ms()); 2138 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time()); 2139 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms()); 2140 _update_rs.add(phase_times->average_last_update_rs_time()); 2141 _scan_rs.add(phase_times->average_last_scan_rs_time()); 2142 _obj_copy.add(phase_times->average_last_obj_copy_time()); 2143 _termination.add(phase_times->average_last_termination_time()); 2144 2145 double parallel_known_time = phase_times->average_last_ext_root_scan_time() + 2146 phase_times->average_last_satb_filtering_times_ms() + 2147 phase_times->average_last_update_rs_time() + 2148 phase_times->average_last_scan_rs_time() + 2149 phase_times->average_last_obj_copy_time() + 2150 + phase_times->average_last_termination_time(); 2151 2152 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2153 _parallel_other.add(parallel_other_time); 2154 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2155 } 2156 } 2157 2158 void TraceGen0TimeData::increment_young_collection_count() { 2159 if(TraceGen0Time) { 2160 ++_young_pause_num; 2161 } 2162 } 2163 2164 void TraceGen0TimeData::increment_mixed_collection_count() { 2165 if(TraceGen0Time) { 2166 ++_mixed_pause_num; 2167 } 2168 } 2169 2170 void TraceGen0TimeData::print_summary(const char* str, 2171 const NumberSeq* seq) const { 2172 double sum = seq->sum(); 2173 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2174 str, sum / 1000.0, seq->avg()); 2175 } 2176 2177 void TraceGen0TimeData::print_summary_sd(const char* str, 2178 const NumberSeq* seq) const { 2179 print_summary(str, seq); 2180 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2181 "(num", seq->num(), seq->sd(), seq->maximum()); 2182 } 2183 2184 void TraceGen0TimeData::print() const { 2185 if (!TraceGen0Time) { 2186 return; 2187 } 2188 2189 gclog_or_tty->print_cr("ALL PAUSES"); 2190 print_summary_sd(" Total", &_total); 2191 gclog_or_tty->print_cr(""); 2192 gclog_or_tty->print_cr(""); 2193 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2194 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2195 gclog_or_tty->print_cr(""); 2196 2197 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2198 2199 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2200 gclog_or_tty->print_cr("none"); 2201 } else { 2202 print_summary_sd(" Evacuation Pauses", &_total); 2203 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2204 print_summary(" Parallel Time", &_parallel); 2205 print_summary(" Ext Root Scanning", &_ext_root_scan); 2206 print_summary(" SATB Filtering", &_satb_filtering); 2207 print_summary(" Update RS", &_update_rs); 2208 print_summary(" Scan RS", &_scan_rs); 2209 print_summary(" Object Copy", &_obj_copy); 2210 print_summary(" Termination", &_termination); 2211 print_summary(" Parallel Other", &_parallel_other); 2212 print_summary(" Clear CT", &_clear_ct); 2213 print_summary(" Other", &_other); 2214 } 2215 gclog_or_tty->print_cr(""); 2216 2217 gclog_or_tty->print_cr("MISC"); 2218 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2219 print_summary_sd(" Yields", &_all_yield_times_ms); 2220 } 2221 2222 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) { 2223 if (TraceGen1Time) { 2224 _all_full_gc_times.add(full_gc_time_ms); 2225 } 2226 } 2227 2228 void TraceGen1TimeData::print() const { 2229 if (!TraceGen1Time) { 2230 return; 2231 } 2232 2233 if (_all_full_gc_times.num() > 0) { 2234 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2235 _all_full_gc_times.num(), 2236 _all_full_gc_times.sum() / 1000.0); 2237 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2238 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2239 _all_full_gc_times.sd(), 2240 _all_full_gc_times.maximum()); 2241 } 2242 }