1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentG1Refine.hpp" 27 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/shared/gcPolicyCounters.hpp" 33 #include "runtime/arguments.hpp" 34 #include "runtime/java.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/debug.hpp" 37 38 #define PREDICTIONS_VERBOSE 0 39 40 // <NEW PREDICTION> 41 42 // Different defaults for different number of GC threads 43 // They were chosen by running GCOld and SPECjbb on debris with different 44 // numbers of GC threads and choosing them based on the results 45 46 // all the same 47 static double rs_length_diff_defaults[] = { 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 49 }; 50 51 static double cost_per_card_ms_defaults[] = { 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 53 }; 54 55 // all the same 56 static double fully_young_cards_per_entry_ratio_defaults[] = { 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 58 }; 59 60 static double cost_per_entry_ms_defaults[] = { 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 62 }; 63 64 static double cost_per_byte_ms_defaults[] = { 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 66 }; 67 68 // these should be pretty consistent 69 static double constant_other_time_ms_defaults[] = { 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 71 }; 72 73 74 static double young_other_cost_per_region_ms_defaults[] = { 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 76 }; 77 78 static double non_young_other_cost_per_region_ms_defaults[] = { 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 80 }; 81 82 // </NEW PREDICTION> 83 84 // Help class for avoiding interleaved logging 85 class LineBuffer: public StackObj { 86 87 private: 88 static const int BUFFER_LEN = 1024; 89 static const int INDENT_CHARS = 3; 90 char _buffer[BUFFER_LEN]; 91 int _indent_level; 92 int _cur; 93 94 void vappend(const char* format, va_list ap) { 95 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap); 96 if (res != -1) { 97 _cur += res; 98 } else { 99 DEBUG_ONLY(warning("buffer too small in LineBuffer");) 100 _buffer[BUFFER_LEN -1] = 0; 101 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again 102 } 103 } 104 105 public: 106 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) { 107 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) { 108 _buffer[_cur] = ' '; 109 } 110 } 111 112 #ifndef PRODUCT 113 ~LineBuffer() { 114 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?"); 115 } 116 #endif 117 118 void append(const char* format, ...) { 119 va_list ap; 120 va_start(ap, format); 121 vappend(format, ap); 122 va_end(ap); 123 } 124 125 void append_and_print_cr(const char* format, ...) { 126 va_list ap; 127 va_start(ap, format); 128 vappend(format, ap); 129 va_end(ap); 130 gclog_or_tty->print_cr("%s", _buffer); 131 _cur = _indent_level * INDENT_CHARS; 132 } 133 }; 134 135 G1CollectorPolicy::G1CollectorPolicy() : 136 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 137 ? ParallelGCThreads : 1), 138 139 _n_pauses(0), 140 _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 141 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 142 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)), 143 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 144 _all_pause_times_ms(new NumberSeq()), 145 _stop_world_start(0.0), 146 _all_stop_world_times_ms(new NumberSeq()), 147 _all_yield_times_ms(new NumberSeq()), 148 149 _all_mod_union_times_ms(new NumberSeq()), 150 151 _summary(new Summary()), 152 153 #ifndef PRODUCT 154 _cur_clear_ct_time_ms(0.0), 155 _min_clear_cc_time_ms(-1.0), 156 _max_clear_cc_time_ms(-1.0), 157 _cur_clear_cc_time_ms(0.0), 158 _cum_clear_cc_time_ms(0.0), 159 _num_cc_clears(0L), 160 #endif 161 162 _region_num_young(0), 163 _region_num_tenured(0), 164 _prev_region_num_young(0), 165 _prev_region_num_tenured(0), 166 167 _aux_num(10), 168 _all_aux_times_ms(new NumberSeq[_aux_num]), 169 _cur_aux_start_times_ms(new double[_aux_num]), 170 _cur_aux_times_ms(new double[_aux_num]), 171 _cur_aux_times_set(new bool[_aux_num]), 172 173 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 174 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 175 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 176 177 // <NEW PREDICTION> 178 179 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 180 _prev_collection_pause_end_ms(0.0), 181 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 182 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 183 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 184 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 185 _partially_young_cards_per_entry_ratio_seq( 186 new TruncatedSeq(TruncatedSeqLength)), 187 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 188 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 189 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 190 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 191 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 192 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 193 _non_young_other_cost_per_region_ms_seq( 194 new TruncatedSeq(TruncatedSeqLength)), 195 196 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 197 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 198 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 199 200 _pause_time_target_ms((double) MaxGCPauseMillis), 201 202 // </NEW PREDICTION> 203 204 _in_young_gc_mode(false), 205 _full_young_gcs(true), 206 _full_young_pause_num(0), 207 _partial_young_pause_num(0), 208 209 _during_marking(false), 210 _in_marking_window(false), 211 _in_marking_window_im(false), 212 213 _known_garbage_ratio(0.0), 214 _known_garbage_bytes(0), 215 216 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), 217 218 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)), 219 220 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)), 221 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)), 222 223 _recent_avg_pause_time_ratio(0.0), 224 _num_markings(0), 225 _n_marks(0), 226 _n_pauses_at_mark_end(0), 227 228 _all_full_gc_times_ms(new NumberSeq()), 229 230 // G1PausesBtwnConcMark defaults to -1 231 // so the hack is to do the cast QQQ FIXME 232 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), 233 _n_marks_since_last_pause(0), 234 _initiate_conc_mark_if_possible(false), 235 _during_initial_mark_pause(false), 236 _should_revert_to_full_young_gcs(false), 237 _last_full_young_gc(false), 238 239 _eden_bytes_before_gc(0), 240 _survivor_bytes_before_gc(0), 241 _capacity_before_gc(0), 242 243 _prev_collection_pause_used_at_end_bytes(0), 244 245 _collection_set(NULL), 246 _collection_set_size(0), 247 _collection_set_bytes_used_before(0), 248 249 // Incremental CSet attributes 250 _inc_cset_build_state(Inactive), 251 _inc_cset_head(NULL), 252 _inc_cset_tail(NULL), 253 _inc_cset_size(0), 254 _inc_cset_young_index(0), 255 _inc_cset_bytes_used_before(0), 256 _inc_cset_max_finger(NULL), 257 _inc_cset_recorded_young_bytes(0), 258 _inc_cset_recorded_rs_lengths(0), 259 _inc_cset_predicted_elapsed_time_ms(0.0), 260 _inc_cset_predicted_bytes_to_copy(0), 261 262 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 263 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 264 #endif // _MSC_VER 265 266 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", 267 G1YoungSurvRateNumRegionsSummary)), 268 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", 269 G1YoungSurvRateNumRegionsSummary)), 270 // add here any more surv rate groups 271 _recorded_survivor_regions(0), 272 _recorded_survivor_head(NULL), 273 _recorded_survivor_tail(NULL), 274 _survivors_age_table(true), 275 276 _gc_overhead_perc(0.0) 277 278 { 279 // Set up the region size and associated fields. Given that the 280 // policy is created before the heap, we have to set this up here, 281 // so it's done as soon as possible. 282 HeapRegion::setup_heap_region_size(Arguments::min_heap_size()); 283 HeapRegionRemSet::setup_remset_size(); 284 285 // Verify PLAB sizes 286 const uint region_size = HeapRegion::GrainWords; 287 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 288 char buffer[128]; 289 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u", 290 OldPLABSize > region_size ? "Old" : "Young", region_size); 291 vm_exit_during_initialization(buffer); 292 } 293 294 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 295 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 296 297 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads]; 298 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; 299 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; 300 301 _par_last_update_rs_times_ms = new double[_parallel_gc_threads]; 302 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads]; 303 304 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads]; 305 306 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads]; 307 308 _par_last_termination_times_ms = new double[_parallel_gc_threads]; 309 _par_last_termination_attempts = new double[_parallel_gc_threads]; 310 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads]; 311 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads]; 312 313 // start conservatively 314 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; 315 316 // <NEW PREDICTION> 317 318 int index; 319 if (ParallelGCThreads == 0) 320 index = 0; 321 else if (ParallelGCThreads > 8) 322 index = 7; 323 else 324 index = ParallelGCThreads - 1; 325 326 _pending_card_diff_seq->add(0.0); 327 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 328 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 329 _fully_young_cards_per_entry_ratio_seq->add( 330 fully_young_cards_per_entry_ratio_defaults[index]); 331 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 332 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 333 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 334 _young_other_cost_per_region_ms_seq->add( 335 young_other_cost_per_region_ms_defaults[index]); 336 _non_young_other_cost_per_region_ms_seq->add( 337 non_young_other_cost_per_region_ms_defaults[index]); 338 339 // </NEW PREDICTION> 340 341 // Below, we might need to calculate the pause time target based on 342 // the pause interval. When we do so we are going to give G1 maximum 343 // flexibility and allow it to do pauses when it needs to. So, we'll 344 // arrange that the pause interval to be pause time target + 1 to 345 // ensure that a) the pause time target is maximized with respect to 346 // the pause interval and b) we maintain the invariant that pause 347 // time target < pause interval. If the user does not want this 348 // maximum flexibility, they will have to set the pause interval 349 // explicitly. 350 351 // First make sure that, if either parameter is set, its value is 352 // reasonable. 353 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 354 if (MaxGCPauseMillis < 1) { 355 vm_exit_during_initialization("MaxGCPauseMillis should be " 356 "greater than 0"); 357 } 358 } 359 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 360 if (GCPauseIntervalMillis < 1) { 361 vm_exit_during_initialization("GCPauseIntervalMillis should be " 362 "greater than 0"); 363 } 364 } 365 366 // Then, if the pause time target parameter was not set, set it to 367 // the default value. 368 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 369 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 370 // The default pause time target in G1 is 200ms 371 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 372 } else { 373 // We do not allow the pause interval to be set without the 374 // pause time target 375 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 376 "without setting MaxGCPauseMillis"); 377 } 378 } 379 380 // Then, if the interval parameter was not set, set it according to 381 // the pause time target (this will also deal with the case when the 382 // pause time target is the default value). 383 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 384 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 385 } 386 387 // Finally, make sure that the two parameters are consistent. 388 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 389 char buffer[256]; 390 jio_snprintf(buffer, 256, 391 "MaxGCPauseMillis (%u) should be less than " 392 "GCPauseIntervalMillis (%u)", 393 MaxGCPauseMillis, GCPauseIntervalMillis); 394 vm_exit_during_initialization(buffer); 395 } 396 397 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 398 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 399 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 400 _sigma = (double) G1ConfidencePercent / 100.0; 401 402 // start conservatively (around 50ms is about right) 403 _concurrent_mark_init_times_ms->add(0.05); 404 _concurrent_mark_remark_times_ms->add(0.05); 405 _concurrent_mark_cleanup_times_ms->add(0.20); 406 _tenuring_threshold = MaxTenuringThreshold; 407 408 // if G1FixedSurvivorSpaceSize is 0 which means the size is not 409 // fixed, then _max_survivor_regions will be calculated at 410 // calculate_young_list_target_length during initialization 411 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; 412 413 assert(GCTimeRatio > 0, 414 "we should have set it to a default value set_g1_gc_flags() " 415 "if a user set it to 0"); 416 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 417 418 initialize_all(); 419 } 420 421 // Increment "i", mod "len" 422 static void inc_mod(int& i, int len) { 423 i++; if (i == len) i = 0; 424 } 425 426 void G1CollectorPolicy::initialize_flags() { 427 set_min_alignment(HeapRegion::GrainBytes); 428 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name())); 429 if (SurvivorRatio < 1) { 430 vm_exit_during_initialization("Invalid survivor ratio specified"); 431 } 432 CollectorPolicy::initialize_flags(); 433 } 434 435 // The easiest way to deal with the parsing of the NewSize / 436 // MaxNewSize / etc. parameteres is to re-use the code in the 437 // TwoGenerationCollectorPolicy class. This is similar to what 438 // ParallelScavenge does with its GenerationSizer class (see 439 // ParallelScavengeHeap::initialize()). We might change this in the 440 // future, but it's a good start. 441 class G1YoungGenSizer : public TwoGenerationCollectorPolicy { 442 size_t size_to_region_num(size_t byte_size) { 443 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes); 444 } 445 446 public: 447 G1YoungGenSizer() { 448 initialize_flags(); 449 initialize_size_info(); 450 } 451 452 size_t min_young_region_num() { 453 return size_to_region_num(_min_gen0_size); 454 } 455 size_t initial_young_region_num() { 456 return size_to_region_num(_initial_gen0_size); 457 } 458 size_t max_young_region_num() { 459 return size_to_region_num(_max_gen0_size); 460 } 461 }; 462 463 void G1CollectorPolicy::init() { 464 // Set aside an initial future to_space. 465 _g1 = G1CollectedHeap::heap(); 466 467 assert(Heap_lock->owned_by_self(), "Locking discipline."); 468 469 initialize_gc_policy_counters(); 470 471 if (G1Gen) { 472 _in_young_gc_mode = true; 473 474 G1YoungGenSizer sizer; 475 size_t initial_region_num = sizer.initial_young_region_num(); 476 477 if (UseAdaptiveSizePolicy) { 478 set_adaptive_young_list_length(true); 479 _young_list_fixed_length = 0; 480 } else { 481 set_adaptive_young_list_length(false); 482 _young_list_fixed_length = initial_region_num; 483 } 484 _free_regions_at_end_of_collection = _g1->free_regions(); 485 calculate_young_list_min_length(); 486 guarantee( _young_list_min_length == 0, "invariant, not enough info" ); 487 calculate_young_list_target_length(); 488 } else { 489 _young_list_fixed_length = 0; 490 _in_young_gc_mode = false; 491 } 492 493 // We may immediately start allocating regions and placing them on the 494 // collection set list. Initialize the per-collection set info 495 start_incremental_cset_building(); 496 } 497 498 // Create the jstat counters for the policy. 499 void G1CollectorPolicy::initialize_gc_policy_counters() 500 { 501 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen); 502 } 503 504 void G1CollectorPolicy::calculate_young_list_min_length() { 505 _young_list_min_length = 0; 506 507 if (!adaptive_young_list_length()) 508 return; 509 510 if (_alloc_rate_ms_seq->num() > 3) { 511 double now_sec = os::elapsedTime(); 512 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 513 double alloc_rate_ms = predict_alloc_rate_ms(); 514 size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms); 515 size_t current_region_num = _g1->young_list()->length(); 516 _young_list_min_length = min_regions + current_region_num; 517 } 518 } 519 520 void G1CollectorPolicy::calculate_young_list_target_length() { 521 if (adaptive_young_list_length()) { 522 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 523 calculate_young_list_target_length(rs_lengths); 524 } else { 525 if (full_young_gcs()) 526 _young_list_target_length = _young_list_fixed_length; 527 else 528 _young_list_target_length = _young_list_fixed_length / 2; 529 } 530 531 // Make sure we allow the application to allocate at least one 532 // region before we need to do a collection again. 533 size_t min_length = _g1->young_list()->length() + 1; 534 _young_list_target_length = MAX2(_young_list_target_length, min_length); 535 calculate_max_gc_locker_expansion(); 536 calculate_survivors_policy(); 537 } 538 539 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) { 540 guarantee( adaptive_young_list_length(), "pre-condition" ); 541 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" ); 542 543 double start_time_sec = os::elapsedTime(); 544 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent); 545 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc); 546 size_t reserve_regions = 547 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0); 548 549 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) { 550 // we are in fully-young mode and there are free regions in the heap 551 552 double survivor_regions_evac_time = 553 predict_survivor_regions_evac_time(); 554 555 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 556 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 557 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 558 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 559 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) 560 + survivor_regions_evac_time; 561 562 // the result 563 size_t final_young_length = 0; 564 565 size_t init_free_regions = 566 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions); 567 568 // if we're still under the pause target... 569 if (base_time_ms <= target_pause_time_ms) { 570 // We make sure that the shortest young length that makes sense 571 // fits within the target pause time. 572 size_t min_young_length = 1; 573 574 if (predict_will_fit(min_young_length, base_time_ms, 575 init_free_regions, target_pause_time_ms)) { 576 // The shortest young length will fit within the target pause time; 577 // we'll now check whether the absolute maximum number of young 578 // regions will fit in the target pause time. If not, we'll do 579 // a binary search between min_young_length and max_young_length 580 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1; 581 size_t max_young_length = abs_max_young_length; 582 583 if (max_young_length > min_young_length) { 584 // Let's check if the initial max young length will fit within the 585 // target pause. If so then there is no need to search for a maximal 586 // young length - we'll return the initial maximum 587 588 if (predict_will_fit(max_young_length, base_time_ms, 589 init_free_regions, target_pause_time_ms)) { 590 // The maximum young length will satisfy the target pause time. 591 // We are done so set min young length to this maximum length. 592 // The code after the loop will then set final_young_length using 593 // the value cached in the minimum length. 594 min_young_length = max_young_length; 595 } else { 596 // The maximum possible number of young regions will not fit within 597 // the target pause time so let's search.... 598 599 size_t diff = (max_young_length - min_young_length) / 2; 600 max_young_length = min_young_length + diff; 601 602 while (max_young_length > min_young_length) { 603 if (predict_will_fit(max_young_length, base_time_ms, 604 init_free_regions, target_pause_time_ms)) { 605 606 // The current max young length will fit within the target 607 // pause time. Note we do not exit the loop here. By setting 608 // min = max, and then increasing the max below means that 609 // we will continue searching for an upper bound in the 610 // range [max..max+diff] 611 min_young_length = max_young_length; 612 } 613 diff = (max_young_length - min_young_length) / 2; 614 max_young_length = min_young_length + diff; 615 } 616 // the above loop found a maximal young length that will fit 617 // within the target pause time. 618 } 619 assert(min_young_length <= abs_max_young_length, "just checking"); 620 } 621 final_young_length = min_young_length; 622 } 623 } 624 // and we're done! 625 626 // we should have at least one region in the target young length 627 _young_list_target_length = 628 final_young_length + _recorded_survivor_regions; 629 630 // let's keep an eye of how long we spend on this calculation 631 // right now, I assume that we'll print it when we need it; we 632 // should really adde it to the breakdown of a pause 633 double end_time_sec = os::elapsedTime(); 634 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0; 635 636 #ifdef TRACE_CALC_YOUNG_LENGTH 637 // leave this in for debugging, just in case 638 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", " 639 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT, 640 target_pause_time_ms, 641 _young_list_target_length 642 elapsed_time_ms, 643 full_young_gcs() ? "full" : "partial", 644 during_initial_mark_pause() ? " i-m" : "", 645 _in_marking_window, 646 _in_marking_window_im); 647 #endif // TRACE_CALC_YOUNG_LENGTH 648 649 if (_young_list_target_length < _young_list_min_length) { 650 // bummer; this means that, if we do a pause when the maximal 651 // length dictates, we'll violate the pause spacing target (the 652 // min length was calculate based on the application's current 653 // alloc rate); 654 655 // so, we have to bite the bullet, and allocate the minimum 656 // number. We'll violate our target, but we just can't meet it. 657 658 #ifdef TRACE_CALC_YOUNG_LENGTH 659 // leave this in for debugging, just in case 660 gclog_or_tty->print_cr("adjusted target length from " 661 SIZE_FORMAT " to " SIZE_FORMAT, 662 _young_list_target_length, _young_list_min_length); 663 #endif // TRACE_CALC_YOUNG_LENGTH 664 665 _young_list_target_length = _young_list_min_length; 666 } 667 } else { 668 // we are in a partially-young mode or we've run out of regions (due 669 // to evacuation failure) 670 671 #ifdef TRACE_CALC_YOUNG_LENGTH 672 // leave this in for debugging, just in case 673 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT 674 _young_list_min_length); 675 #endif // TRACE_CALC_YOUNG_LENGTH 676 // we'll do the pause as soon as possible by choosing the minimum 677 _young_list_target_length = _young_list_min_length; 678 } 679 680 _rs_lengths_prediction = rs_lengths; 681 } 682 683 // This is used by: calculate_young_list_target_length(rs_length). It 684 // returns true iff: 685 // the predicted pause time for the given young list will not overflow 686 // the target pause time 687 // and: 688 // the predicted amount of surviving data will not overflow the 689 // the amount of free space available for survivor regions. 690 // 691 bool 692 G1CollectorPolicy::predict_will_fit(size_t young_length, 693 double base_time_ms, 694 size_t init_free_regions, 695 double target_pause_time_ms) { 696 697 if (young_length >= init_free_regions) 698 // end condition 1: not enough space for the young regions 699 return false; 700 701 double accum_surv_rate_adj = 0.0; 702 double accum_surv_rate = 703 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj; 704 705 size_t bytes_to_copy = 706 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 707 708 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 709 710 double young_other_time_ms = 711 predict_young_other_time_ms(young_length); 712 713 double pause_time_ms = 714 base_time_ms + copy_time_ms + young_other_time_ms; 715 716 if (pause_time_ms > target_pause_time_ms) 717 // end condition 2: over the target pause time 718 return false; 719 720 size_t free_bytes = 721 (init_free_regions - young_length) * HeapRegion::GrainBytes; 722 723 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes) 724 // end condition 3: out of to-space (conservatively) 725 return false; 726 727 // success! 728 return true; 729 } 730 731 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 732 double survivor_regions_evac_time = 0.0; 733 for (HeapRegion * r = _recorded_survivor_head; 734 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 735 r = r->get_next_young_region()) { 736 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true); 737 } 738 return survivor_regions_evac_time; 739 } 740 741 void G1CollectorPolicy::check_prediction_validity() { 742 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 743 744 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 745 if (rs_lengths > _rs_lengths_prediction) { 746 // add 10% to avoid having to recalculate often 747 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 748 calculate_young_list_target_length(rs_lengths_prediction); 749 } 750 } 751 752 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 753 bool is_tlab, 754 bool* gc_overhead_limit_was_exceeded) { 755 guarantee(false, "Not using this policy feature yet."); 756 return NULL; 757 } 758 759 // This method controls how a collector handles one or more 760 // of its generations being fully allocated. 761 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 762 bool is_tlab) { 763 guarantee(false, "Not using this policy feature yet."); 764 return NULL; 765 } 766 767 768 #ifndef PRODUCT 769 bool G1CollectorPolicy::verify_young_ages() { 770 HeapRegion* head = _g1->young_list()->first_region(); 771 return 772 verify_young_ages(head, _short_lived_surv_rate_group); 773 // also call verify_young_ages on any additional surv rate groups 774 } 775 776 bool 777 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 778 SurvRateGroup *surv_rate_group) { 779 guarantee( surv_rate_group != NULL, "pre-condition" ); 780 781 const char* name = surv_rate_group->name(); 782 bool ret = true; 783 int prev_age = -1; 784 785 for (HeapRegion* curr = head; 786 curr != NULL; 787 curr = curr->get_next_young_region()) { 788 SurvRateGroup* group = curr->surv_rate_group(); 789 if (group == NULL && !curr->is_survivor()) { 790 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 791 ret = false; 792 } 793 794 if (surv_rate_group == group) { 795 int age = curr->age_in_surv_rate_group(); 796 797 if (age < 0) { 798 gclog_or_tty->print_cr("## %s: encountered negative age", name); 799 ret = false; 800 } 801 802 if (age <= prev_age) { 803 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 804 "(%d, %d)", name, age, prev_age); 805 ret = false; 806 } 807 prev_age = age; 808 } 809 } 810 811 return ret; 812 } 813 #endif // PRODUCT 814 815 void G1CollectorPolicy::record_full_collection_start() { 816 _cur_collection_start_sec = os::elapsedTime(); 817 // Release the future to-space so that it is available for compaction into. 818 _g1->set_full_collection(); 819 } 820 821 void G1CollectorPolicy::record_full_collection_end() { 822 // Consider this like a collection pause for the purposes of allocation 823 // since last pause. 824 double end_sec = os::elapsedTime(); 825 double full_gc_time_sec = end_sec - _cur_collection_start_sec; 826 double full_gc_time_ms = full_gc_time_sec * 1000.0; 827 828 _all_full_gc_times_ms->add(full_gc_time_ms); 829 830 update_recent_gc_times(end_sec, full_gc_time_ms); 831 832 _g1->clear_full_collection(); 833 834 // "Nuke" the heuristics that control the fully/partially young GC 835 // transitions and make sure we start with fully young GCs after the 836 // Full GC. 837 set_full_young_gcs(true); 838 _last_full_young_gc = false; 839 _should_revert_to_full_young_gcs = false; 840 clear_initiate_conc_mark_if_possible(); 841 clear_during_initial_mark_pause(); 842 _known_garbage_bytes = 0; 843 _known_garbage_ratio = 0.0; 844 _in_marking_window = false; 845 _in_marking_window_im = false; 846 847 _short_lived_surv_rate_group->start_adding_regions(); 848 // also call this on any additional surv rate groups 849 850 record_survivor_regions(0, NULL, NULL); 851 852 _prev_region_num_young = _region_num_young; 853 _prev_region_num_tenured = _region_num_tenured; 854 855 _free_regions_at_end_of_collection = _g1->free_regions(); 856 // Reset survivors SurvRateGroup. 857 _survivor_surv_rate_group->reset(); 858 calculate_young_list_min_length(); 859 calculate_young_list_target_length(); 860 } 861 862 void G1CollectorPolicy::record_before_bytes(size_t bytes) { 863 _bytes_in_to_space_before_gc += bytes; 864 } 865 866 void G1CollectorPolicy::record_after_bytes(size_t bytes) { 867 _bytes_in_to_space_after_gc += bytes; 868 } 869 870 void G1CollectorPolicy::record_stop_world_start() { 871 _stop_world_start = os::elapsedTime(); 872 } 873 874 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, 875 size_t start_used) { 876 if (PrintGCDetails) { 877 gclog_or_tty->stamp(PrintGCTimeStamps); 878 gclog_or_tty->print("[GC pause"); 879 if (in_young_gc_mode()) 880 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial"); 881 } 882 883 assert(_g1->used() == _g1->recalculate_used(), 884 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 885 _g1->used(), _g1->recalculate_used())); 886 887 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 888 _all_stop_world_times_ms->add(s_w_t_ms); 889 _stop_world_start = 0.0; 890 891 _cur_collection_start_sec = start_time_sec; 892 _cur_collection_pause_used_at_start_bytes = start_used; 893 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 894 _pending_cards = _g1->pending_card_num(); 895 _max_pending_cards = _g1->max_pending_card_num(); 896 897 _bytes_in_to_space_before_gc = 0; 898 _bytes_in_to_space_after_gc = 0; 899 _bytes_in_collection_set_before_gc = 0; 900 901 YoungList* young_list = _g1->young_list(); 902 _eden_bytes_before_gc = young_list->eden_used_bytes(); 903 _survivor_bytes_before_gc = young_list->survivor_used_bytes(); 904 _capacity_before_gc = _g1->capacity(); 905 906 #ifdef DEBUG 907 // initialise these to something well known so that we can spot 908 // if they are not set properly 909 910 for (int i = 0; i < _parallel_gc_threads; ++i) { 911 _par_last_gc_worker_start_times_ms[i] = -1234.0; 912 _par_last_ext_root_scan_times_ms[i] = -1234.0; 913 _par_last_mark_stack_scan_times_ms[i] = -1234.0; 914 _par_last_update_rs_times_ms[i] = -1234.0; 915 _par_last_update_rs_processed_buffers[i] = -1234.0; 916 _par_last_scan_rs_times_ms[i] = -1234.0; 917 _par_last_obj_copy_times_ms[i] = -1234.0; 918 _par_last_termination_times_ms[i] = -1234.0; 919 _par_last_termination_attempts[i] = -1234.0; 920 _par_last_gc_worker_end_times_ms[i] = -1234.0; 921 _par_last_gc_worker_times_ms[i] = -1234.0; 922 } 923 #endif 924 925 for (int i = 0; i < _aux_num; ++i) { 926 _cur_aux_times_ms[i] = 0.0; 927 _cur_aux_times_set[i] = false; 928 } 929 930 _satb_drain_time_set = false; 931 _last_satb_drain_processed_buffers = -1; 932 933 if (in_young_gc_mode()) 934 _last_young_gc_full = false; 935 936 // do that for any other surv rate groups 937 _short_lived_surv_rate_group->stop_adding_regions(); 938 _survivors_age_table.clear(); 939 940 assert( verify_young_ages(), "region age verification" ); 941 } 942 943 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) { 944 _mark_closure_time_ms = mark_closure_time_ms; 945 } 946 947 void G1CollectorPolicy::record_concurrent_mark_init_start() { 948 _mark_init_start_sec = os::elapsedTime(); 949 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode"); 950 } 951 952 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double 953 mark_init_elapsed_time_ms) { 954 _during_marking = true; 955 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 956 clear_during_initial_mark_pause(); 957 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 958 } 959 960 void G1CollectorPolicy::record_concurrent_mark_init_end() { 961 double end_time_sec = os::elapsedTime(); 962 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0; 963 _concurrent_mark_init_times_ms->add(elapsed_time_ms); 964 record_concurrent_mark_init_end_pre(elapsed_time_ms); 965 966 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true); 967 } 968 969 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 970 _mark_remark_start_sec = os::elapsedTime(); 971 _during_marking = false; 972 } 973 974 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 975 double end_time_sec = os::elapsedTime(); 976 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 977 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 978 _cur_mark_stop_world_time_ms += elapsed_time_ms; 979 _prev_collection_pause_end_ms += elapsed_time_ms; 980 981 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 982 } 983 984 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 985 _mark_cleanup_start_sec = os::elapsedTime(); 986 } 987 988 void 989 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes, 990 size_t max_live_bytes) { 991 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); 992 record_concurrent_mark_cleanup_end_work2(); 993 } 994 995 void 996 G1CollectorPolicy:: 997 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, 998 size_t max_live_bytes) { 999 if (_n_marks < 2) _n_marks++; 1000 if (G1PolicyVerbose > 0) 1001 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB " 1002 " (of " SIZE_FORMAT " MB heap).", 1003 max_live_bytes/M, _g1->capacity()/M); 1004 } 1005 1006 // The important thing about this is that it includes "os::elapsedTime". 1007 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() { 1008 double end_time_sec = os::elapsedTime(); 1009 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0; 1010 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1011 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1012 _prev_collection_pause_end_ms += elapsed_time_ms; 1013 1014 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true); 1015 1016 _num_markings++; 1017 1018 // We did a marking, so reset the "since_last_mark" variables. 1019 double considerConcMarkCost = 1.0; 1020 // If there are available processors, concurrent activity is free... 1021 if (Threads::number_of_non_daemon_threads() * 2 < 1022 os::active_processor_count()) { 1023 considerConcMarkCost = 0.0; 1024 } 1025 _n_pauses_at_mark_end = _n_pauses; 1026 _n_marks_since_last_pause++; 1027 } 1028 1029 void 1030 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 1031 if (in_young_gc_mode()) { 1032 _should_revert_to_full_young_gcs = false; 1033 _last_full_young_gc = true; 1034 _in_marking_window = false; 1035 if (adaptive_young_list_length()) 1036 calculate_young_list_target_length(); 1037 } 1038 } 1039 1040 void G1CollectorPolicy::record_concurrent_pause() { 1041 if (_stop_world_start > 0.0) { 1042 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 1043 _all_yield_times_ms->add(yield_ms); 1044 } 1045 } 1046 1047 void G1CollectorPolicy::record_concurrent_pause_end() { 1048 } 1049 1050 template<class T> 1051 T sum_of(T* sum_arr, int start, int n, int N) { 1052 T sum = (T)0; 1053 for (int i = 0; i < n; i++) { 1054 int j = (start + i) % N; 1055 sum += sum_arr[j]; 1056 } 1057 return sum; 1058 } 1059 1060 void G1CollectorPolicy::print_par_stats(int level, 1061 const char* str, 1062 double* data) { 1063 double min = data[0], max = data[0]; 1064 double total = 0.0; 1065 LineBuffer buf(level); 1066 buf.append("[%s (ms):", str); 1067 for (uint i = 0; i < ParallelGCThreads; ++i) { 1068 double val = data[i]; 1069 if (val < min) 1070 min = val; 1071 if (val > max) 1072 max = val; 1073 total += val; 1074 buf.append(" %3.1lf", val); 1075 } 1076 buf.append_and_print_cr(""); 1077 double avg = total / (double) ParallelGCThreads; 1078 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]", 1079 avg, min, max, max - min); 1080 } 1081 1082 void G1CollectorPolicy::print_par_sizes(int level, 1083 const char* str, 1084 double* data) { 1085 double min = data[0], max = data[0]; 1086 double total = 0.0; 1087 LineBuffer buf(level); 1088 buf.append("[%s :", str); 1089 for (uint i = 0; i < ParallelGCThreads; ++i) { 1090 double val = data[i]; 1091 if (val < min) 1092 min = val; 1093 if (val > max) 1094 max = val; 1095 total += val; 1096 buf.append(" %d", (int) val); 1097 } 1098 buf.append_and_print_cr(""); 1099 double avg = total / (double) ParallelGCThreads; 1100 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]", 1101 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min); 1102 } 1103 1104 void G1CollectorPolicy::print_stats (int level, 1105 const char* str, 1106 double value) { 1107 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value); 1108 } 1109 1110 void G1CollectorPolicy::print_stats (int level, 1111 const char* str, 1112 int value) { 1113 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value); 1114 } 1115 1116 double G1CollectorPolicy::avg_value (double* data) { 1117 if (G1CollectedHeap::use_parallel_gc_threads()) { 1118 double ret = 0.0; 1119 for (uint i = 0; i < ParallelGCThreads; ++i) 1120 ret += data[i]; 1121 return ret / (double) ParallelGCThreads; 1122 } else { 1123 return data[0]; 1124 } 1125 } 1126 1127 double G1CollectorPolicy::max_value (double* data) { 1128 if (G1CollectedHeap::use_parallel_gc_threads()) { 1129 double ret = data[0]; 1130 for (uint i = 1; i < ParallelGCThreads; ++i) 1131 if (data[i] > ret) 1132 ret = data[i]; 1133 return ret; 1134 } else { 1135 return data[0]; 1136 } 1137 } 1138 1139 double G1CollectorPolicy::sum_of_values (double* data) { 1140 if (G1CollectedHeap::use_parallel_gc_threads()) { 1141 double sum = 0.0; 1142 for (uint i = 0; i < ParallelGCThreads; i++) 1143 sum += data[i]; 1144 return sum; 1145 } else { 1146 return data[0]; 1147 } 1148 } 1149 1150 double G1CollectorPolicy::max_sum (double* data1, 1151 double* data2) { 1152 double ret = data1[0] + data2[0]; 1153 1154 if (G1CollectedHeap::use_parallel_gc_threads()) { 1155 for (uint i = 1; i < ParallelGCThreads; ++i) { 1156 double data = data1[i] + data2[i]; 1157 if (data > ret) 1158 ret = data; 1159 } 1160 } 1161 return ret; 1162 } 1163 1164 // Anything below that is considered to be zero 1165 #define MIN_TIMER_GRANULARITY 0.0000001 1166 1167 void G1CollectorPolicy::record_collection_pause_end() { 1168 double end_time_sec = os::elapsedTime(); 1169 double elapsed_ms = _last_pause_time_ms; 1170 bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 1171 size_t rs_size = 1172 _cur_collection_pause_used_regions_at_start - collection_set_size(); 1173 size_t cur_used_bytes = _g1->used(); 1174 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1175 bool last_pause_included_initial_mark = false; 1176 bool update_stats = !_g1->evacuation_failed(); 1177 1178 #ifndef PRODUCT 1179 if (G1YoungSurvRateVerbose) { 1180 gclog_or_tty->print_cr(""); 1181 _short_lived_surv_rate_group->print(); 1182 // do that for any other surv rate groups too 1183 } 1184 #endif // PRODUCT 1185 1186 if (in_young_gc_mode()) { 1187 last_pause_included_initial_mark = during_initial_mark_pause(); 1188 if (last_pause_included_initial_mark) 1189 record_concurrent_mark_init_end_pre(0.0); 1190 1191 size_t min_used_targ = 1192 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 1193 1194 1195 if (!_g1->mark_in_progress() && !_last_full_young_gc) { 1196 assert(!last_pause_included_initial_mark, "invariant"); 1197 if (cur_used_bytes > min_used_targ && 1198 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { 1199 assert(!during_initial_mark_pause(), "we should not see this here"); 1200 1201 // Note: this might have already been set, if during the last 1202 // pause we decided to start a cycle but at the beginning of 1203 // this pause we decided to postpone it. That's OK. 1204 set_initiate_conc_mark_if_possible(); 1205 } 1206 } 1207 1208 _prev_collection_pause_used_at_end_bytes = cur_used_bytes; 1209 } 1210 1211 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, 1212 end_time_sec, false); 1213 1214 guarantee(_cur_collection_pause_used_regions_at_start >= 1215 collection_set_size(), 1216 "Negative RS size?"); 1217 1218 // This assert is exempted when we're doing parallel collection pauses, 1219 // because the fragmentation caused by the parallel GC allocation buffers 1220 // can lead to more memory being used during collection than was used 1221 // before. Best leave this out until the fragmentation problem is fixed. 1222 // Pauses in which evacuation failed can also lead to negative 1223 // collections, since no space is reclaimed from a region containing an 1224 // object whose evacuation failed. 1225 // Further, we're now always doing parallel collection. But I'm still 1226 // leaving this here as a placeholder for a more precise assertion later. 1227 // (DLD, 10/05.) 1228 assert((true || parallel) // Always using GC LABs now. 1229 || _g1->evacuation_failed() 1230 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes, 1231 "Negative collection"); 1232 1233 size_t freed_bytes = 1234 _cur_collection_pause_used_at_start_bytes - cur_used_bytes; 1235 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes; 1236 1237 double survival_fraction = 1238 (double)surviving_bytes/ 1239 (double)_collection_set_bytes_used_before; 1240 1241 _n_pauses++; 1242 1243 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms); 1244 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms); 1245 double update_rs_time = avg_value(_par_last_update_rs_times_ms); 1246 double update_rs_processed_buffers = 1247 sum_of_values(_par_last_update_rs_processed_buffers); 1248 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms); 1249 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms); 1250 double termination_time = avg_value(_par_last_termination_times_ms); 1251 1252 double parallel_known_time = update_rs_time + 1253 ext_root_scan_time + 1254 mark_stack_scan_time + 1255 scan_rs_time + 1256 obj_copy_time + 1257 termination_time; 1258 1259 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time; 1260 1261 PauseSummary* summary = _summary; 1262 1263 if (update_stats) { 1264 _recent_rs_scan_times_ms->add(scan_rs_time); 1265 _recent_pause_times_ms->add(elapsed_ms); 1266 _recent_rs_sizes->add(rs_size); 1267 1268 MainBodySummary* body_summary = summary->main_body_summary(); 1269 guarantee(body_summary != NULL, "should not be null!"); 1270 1271 if (_satb_drain_time_set) 1272 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms); 1273 else 1274 body_summary->record_satb_drain_time_ms(0.0); 1275 1276 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time); 1277 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time); 1278 body_summary->record_update_rs_time_ms(update_rs_time); 1279 body_summary->record_scan_rs_time_ms(scan_rs_time); 1280 body_summary->record_obj_copy_time_ms(obj_copy_time); 1281 if (parallel) { 1282 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms); 1283 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms); 1284 body_summary->record_termination_time_ms(termination_time); 1285 body_summary->record_parallel_other_time_ms(parallel_other_time); 1286 } 1287 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms); 1288 1289 // We exempt parallel collection from this check because Alloc Buffer 1290 // fragmentation can produce negative collections. Same with evac 1291 // failure. 1292 // Further, we're now always doing parallel collection. But I'm still 1293 // leaving this here as a placeholder for a more precise assertion later. 1294 // (DLD, 10/05. 1295 assert((true || parallel) 1296 || _g1->evacuation_failed() 1297 || surviving_bytes <= _collection_set_bytes_used_before, 1298 "Or else negative collection!"); 1299 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before); 1300 _recent_CS_bytes_surviving->add(surviving_bytes); 1301 1302 // this is where we update the allocation rate of the application 1303 double app_time_ms = 1304 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms); 1305 if (app_time_ms < MIN_TIMER_GRANULARITY) { 1306 // This usually happens due to the timer not having the required 1307 // granularity. Some Linuxes are the usual culprits. 1308 // We'll just set it to something (arbitrarily) small. 1309 app_time_ms = 1.0; 1310 } 1311 size_t regions_allocated = 1312 (_region_num_young - _prev_region_num_young) + 1313 (_region_num_tenured - _prev_region_num_tenured); 1314 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1315 _alloc_rate_ms_seq->add(alloc_rate_ms); 1316 _prev_region_num_young = _region_num_young; 1317 _prev_region_num_tenured = _region_num_tenured; 1318 1319 double interval_ms = 1320 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1321 update_recent_gc_times(end_time_sec, elapsed_ms); 1322 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1323 if (recent_avg_pause_time_ratio() < 0.0 || 1324 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1325 #ifndef PRODUCT 1326 // Dump info to allow post-facto debugging 1327 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 1328 gclog_or_tty->print_cr("-------------------------------------------"); 1329 gclog_or_tty->print_cr("Recent GC Times (ms):"); 1330 _recent_gc_times_ms->dump(); 1331 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 1332 _recent_prev_end_times_for_all_gcs_sec->dump(); 1333 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 1334 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 1335 // In debug mode, terminate the JVM if the user wants to debug at this point. 1336 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 1337 #endif // !PRODUCT 1338 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1339 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1340 if (_recent_avg_pause_time_ratio < 0.0) { 1341 _recent_avg_pause_time_ratio = 0.0; 1342 } else { 1343 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1344 _recent_avg_pause_time_ratio = 1.0; 1345 } 1346 } 1347 } 1348 1349 if (G1PolicyVerbose > 1) { 1350 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses); 1351 } 1352 1353 if (G1PolicyVerbose > 1) { 1354 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n" 1355 " ET-RS: %10.6f ms (avg: %10.6f ms)\n" 1356 " |RS|: " SIZE_FORMAT, 1357 elapsed_ms, recent_avg_time_for_pauses_ms(), 1358 scan_rs_time, recent_avg_time_for_rs_scan_ms(), 1359 rs_size); 1360 1361 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K" 1362 " At end " SIZE_FORMAT "K\n" 1363 " garbage : " SIZE_FORMAT "K" 1364 " of " SIZE_FORMAT "K\n" 1365 " survival : %6.2f%% (%6.2f%% avg)", 1366 _cur_collection_pause_used_at_start_bytes/K, 1367 _g1->used()/K, freed_bytes/K, 1368 _collection_set_bytes_used_before/K, 1369 survival_fraction*100.0, 1370 recent_avg_survival_fraction()*100.0); 1371 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f", 1372 recent_avg_pause_time_ratio() * 100.0); 1373 } 1374 1375 double other_time_ms = elapsed_ms; 1376 1377 if (_satb_drain_time_set) { 1378 other_time_ms -= _cur_satb_drain_time_ms; 1379 } 1380 1381 if (parallel) { 1382 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms; 1383 } else { 1384 other_time_ms -= 1385 update_rs_time + 1386 ext_root_scan_time + mark_stack_scan_time + 1387 scan_rs_time + obj_copy_time; 1388 } 1389 1390 if (PrintGCDetails) { 1391 gclog_or_tty->print_cr("%s, %1.8lf secs]", 1392 (last_pause_included_initial_mark) ? " (initial-mark)" : "", 1393 elapsed_ms / 1000.0); 1394 1395 if (_satb_drain_time_set) { 1396 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms); 1397 } 1398 if (_last_satb_drain_processed_buffers >= 0) { 1399 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers); 1400 } 1401 if (parallel) { 1402 print_stats(1, "Parallel Time", _cur_collection_par_time_ms); 1403 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms); 1404 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); 1405 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers); 1406 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); 1407 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); 1408 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); 1409 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); 1410 print_par_stats(2, "Termination", _par_last_termination_times_ms); 1411 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts); 1412 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms); 1413 1414 for (int i = 0; i < _parallel_gc_threads; i++) { 1415 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i]; 1416 } 1417 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms); 1418 1419 print_stats(2, "Parallel Other", parallel_other_time); 1420 print_stats(1, "Clear CT", _cur_clear_ct_time_ms); 1421 } else { 1422 print_stats(1, "Update RS", update_rs_time); 1423 print_stats(2, "Processed Buffers", 1424 (int)update_rs_processed_buffers); 1425 print_stats(1, "Ext Root Scanning", ext_root_scan_time); 1426 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time); 1427 print_stats(1, "Scan RS", scan_rs_time); 1428 print_stats(1, "Object Copying", obj_copy_time); 1429 } 1430 #ifndef PRODUCT 1431 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms); 1432 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms); 1433 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms); 1434 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms); 1435 if (_num_cc_clears > 0) { 1436 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears)); 1437 } 1438 #endif 1439 print_stats(1, "Other", other_time_ms); 1440 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms); 1441 1442 for (int i = 0; i < _aux_num; ++i) { 1443 if (_cur_aux_times_set[i]) { 1444 char buffer[96]; 1445 sprintf(buffer, "Aux%d", i); 1446 print_stats(1, buffer, _cur_aux_times_ms[i]); 1447 } 1448 } 1449 } 1450 1451 _all_pause_times_ms->add(elapsed_ms); 1452 if (update_stats) { 1453 summary->record_total_time_ms(elapsed_ms); 1454 summary->record_other_time_ms(other_time_ms); 1455 } 1456 for (int i = 0; i < _aux_num; ++i) 1457 if (_cur_aux_times_set[i]) 1458 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]); 1459 1460 // Reset marks-between-pauses counter. 1461 _n_marks_since_last_pause = 0; 1462 1463 // Update the efficiency-since-mark vars. 1464 double proc_ms = elapsed_ms * (double) _parallel_gc_threads; 1465 if (elapsed_ms < MIN_TIMER_GRANULARITY) { 1466 // This usually happens due to the timer not having the required 1467 // granularity. Some Linuxes are the usual culprits. 1468 // We'll just set it to something (arbitrarily) small. 1469 proc_ms = 1.0; 1470 } 1471 double cur_efficiency = (double) freed_bytes / proc_ms; 1472 1473 bool new_in_marking_window = _in_marking_window; 1474 bool new_in_marking_window_im = false; 1475 if (during_initial_mark_pause()) { 1476 new_in_marking_window = true; 1477 new_in_marking_window_im = true; 1478 } 1479 1480 if (in_young_gc_mode()) { 1481 if (_last_full_young_gc) { 1482 set_full_young_gcs(false); 1483 _last_full_young_gc = false; 1484 } 1485 1486 if ( !_last_young_gc_full ) { 1487 if ( _should_revert_to_full_young_gcs || 1488 _known_garbage_ratio < 0.05 || 1489 (adaptive_young_list_length() && 1490 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) { 1491 set_full_young_gcs(true); 1492 } 1493 } 1494 _should_revert_to_full_young_gcs = false; 1495 1496 if (_last_young_gc_full && !_during_marking) 1497 _young_gc_eff_seq->add(cur_efficiency); 1498 } 1499 1500 _short_lived_surv_rate_group->start_adding_regions(); 1501 // do that for any other surv rate groupsx 1502 1503 // <NEW PREDICTION> 1504 1505 if (update_stats) { 1506 double pause_time_ms = elapsed_ms; 1507 1508 size_t diff = 0; 1509 if (_max_pending_cards >= _pending_cards) 1510 diff = _max_pending_cards - _pending_cards; 1511 _pending_card_diff_seq->add((double) diff); 1512 1513 double cost_per_card_ms = 0.0; 1514 if (_pending_cards > 0) { 1515 cost_per_card_ms = update_rs_time / (double) _pending_cards; 1516 _cost_per_card_ms_seq->add(cost_per_card_ms); 1517 } 1518 1519 size_t cards_scanned = _g1->cards_scanned(); 1520 1521 double cost_per_entry_ms = 0.0; 1522 if (cards_scanned > 10) { 1523 cost_per_entry_ms = scan_rs_time / (double) cards_scanned; 1524 if (_last_young_gc_full) 1525 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1526 else 1527 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1528 } 1529 1530 if (_max_rs_lengths > 0) { 1531 double cards_per_entry_ratio = 1532 (double) cards_scanned / (double) _max_rs_lengths; 1533 if (_last_young_gc_full) 1534 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1535 else 1536 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1537 } 1538 1539 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1540 if (rs_length_diff >= 0) 1541 _rs_length_diff_seq->add((double) rs_length_diff); 1542 1543 size_t copied_bytes = surviving_bytes; 1544 double cost_per_byte_ms = 0.0; 1545 if (copied_bytes > 0) { 1546 cost_per_byte_ms = obj_copy_time / (double) copied_bytes; 1547 if (_in_marking_window) 1548 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1549 else 1550 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1551 } 1552 1553 double all_other_time_ms = pause_time_ms - 1554 (update_rs_time + scan_rs_time + obj_copy_time + 1555 _mark_closure_time_ms + termination_time); 1556 1557 double young_other_time_ms = 0.0; 1558 if (_recorded_young_regions > 0) { 1559 young_other_time_ms = 1560 _recorded_young_cset_choice_time_ms + 1561 _recorded_young_free_cset_time_ms; 1562 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1563 (double) _recorded_young_regions); 1564 } 1565 double non_young_other_time_ms = 0.0; 1566 if (_recorded_non_young_regions > 0) { 1567 non_young_other_time_ms = 1568 _recorded_non_young_cset_choice_time_ms + 1569 _recorded_non_young_free_cset_time_ms; 1570 1571 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1572 (double) _recorded_non_young_regions); 1573 } 1574 1575 double constant_other_time_ms = all_other_time_ms - 1576 (young_other_time_ms + non_young_other_time_ms); 1577 _constant_other_time_ms_seq->add(constant_other_time_ms); 1578 1579 double survival_ratio = 0.0; 1580 if (_bytes_in_collection_set_before_gc > 0) { 1581 survival_ratio = (double) bytes_in_to_space_during_gc() / 1582 (double) _bytes_in_collection_set_before_gc; 1583 } 1584 1585 _pending_cards_seq->add((double) _pending_cards); 1586 _scanned_cards_seq->add((double) cards_scanned); 1587 _rs_lengths_seq->add((double) _max_rs_lengths); 1588 1589 double expensive_region_limit_ms = 1590 (double) MaxGCPauseMillis - predict_constant_other_time_ms(); 1591 if (expensive_region_limit_ms < 0.0) { 1592 // this means that the other time was predicted to be longer than 1593 // than the max pause time 1594 expensive_region_limit_ms = (double) MaxGCPauseMillis; 1595 } 1596 _expensive_region_limit_ms = expensive_region_limit_ms; 1597 1598 if (PREDICTIONS_VERBOSE) { 1599 gclog_or_tty->print_cr(""); 1600 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d " 1601 "REGIONS %d %d %d " 1602 "PENDING_CARDS %d %d " 1603 "CARDS_SCANNED %d %d " 1604 "RS_LENGTHS %d %d " 1605 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf " 1606 "SURVIVAL_RATIO %1.6lf %1.6lf " 1607 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf " 1608 "OTHER_YOUNG %1.6lf %1.6lf " 1609 "OTHER_NON_YOUNG %1.6lf %1.6lf " 1610 "VTIME_DIFF %1.6lf TERMINATION %1.6lf " 1611 "ELAPSED %1.6lf %1.6lf ", 1612 _cur_collection_start_sec, 1613 (!_last_young_gc_full) ? 2 : 1614 (last_pause_included_initial_mark) ? 1 : 0, 1615 _recorded_region_num, 1616 _recorded_young_regions, 1617 _recorded_non_young_regions, 1618 _predicted_pending_cards, _pending_cards, 1619 _predicted_cards_scanned, cards_scanned, 1620 _predicted_rs_lengths, _max_rs_lengths, 1621 _predicted_rs_update_time_ms, update_rs_time, 1622 _predicted_rs_scan_time_ms, scan_rs_time, 1623 _predicted_survival_ratio, survival_ratio, 1624 _predicted_object_copy_time_ms, obj_copy_time, 1625 _predicted_constant_other_time_ms, constant_other_time_ms, 1626 _predicted_young_other_time_ms, young_other_time_ms, 1627 _predicted_non_young_other_time_ms, 1628 non_young_other_time_ms, 1629 _vtime_diff_ms, termination_time, 1630 _predicted_pause_time_ms, elapsed_ms); 1631 } 1632 1633 if (G1PolicyVerbose > 0) { 1634 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms", 1635 _predicted_pause_time_ms, 1636 (_within_target) ? "within" : "outside", 1637 elapsed_ms); 1638 } 1639 1640 } 1641 1642 _in_marking_window = new_in_marking_window; 1643 _in_marking_window_im = new_in_marking_window_im; 1644 _free_regions_at_end_of_collection = _g1->free_regions(); 1645 calculate_young_list_min_length(); 1646 calculate_young_list_target_length(); 1647 1648 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1649 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1650 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); 1651 // </NEW PREDICTION> 1652 } 1653 1654 #define EXT_SIZE_FORMAT "%d%s" 1655 #define EXT_SIZE_PARAMS(bytes) \ 1656 byte_size_in_proper_unit((bytes)), \ 1657 proper_unit_for_byte_size((bytes)) 1658 1659 void G1CollectorPolicy::print_heap_transition() { 1660 if (PrintGCDetails) { 1661 YoungList* young_list = _g1->young_list(); 1662 size_t eden_bytes = young_list->eden_used_bytes(); 1663 size_t survivor_bytes = young_list->survivor_used_bytes(); 1664 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes; 1665 size_t used = _g1->used(); 1666 size_t capacity = _g1->capacity(); 1667 1668 gclog_or_tty->print_cr( 1669 " [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1670 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1671 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1672 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1673 EXT_SIZE_PARAMS(_eden_bytes_before_gc), 1674 EXT_SIZE_PARAMS(eden_bytes), 1675 EXT_SIZE_PARAMS(_survivor_bytes_before_gc), 1676 EXT_SIZE_PARAMS(survivor_bytes), 1677 EXT_SIZE_PARAMS(used_before_gc), 1678 EXT_SIZE_PARAMS(_capacity_before_gc), 1679 EXT_SIZE_PARAMS(used), 1680 EXT_SIZE_PARAMS(capacity)); 1681 } else if (PrintGC) { 1682 _g1->print_size_transition(gclog_or_tty, 1683 _cur_collection_pause_used_at_start_bytes, 1684 _g1->used(), _g1->capacity()); 1685 } 1686 } 1687 1688 // <NEW PREDICTION> 1689 1690 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1691 double update_rs_processed_buffers, 1692 double goal_ms) { 1693 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1694 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1695 1696 if (G1UseAdaptiveConcRefinement) { 1697 const int k_gy = 3, k_gr = 6; 1698 const double inc_k = 1.1, dec_k = 0.9; 1699 1700 int g = cg1r->green_zone(); 1701 if (update_rs_time > goal_ms) { 1702 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1703 } else { 1704 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1705 g = (int)MAX2(g * inc_k, g + 1.0); 1706 } 1707 } 1708 // Change the refinement threads params 1709 cg1r->set_green_zone(g); 1710 cg1r->set_yellow_zone(g * k_gy); 1711 cg1r->set_red_zone(g * k_gr); 1712 cg1r->reinitialize_threads(); 1713 1714 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1715 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1716 cg1r->yellow_zone()); 1717 // Change the barrier params 1718 dcqs.set_process_completed_threshold(processing_threshold); 1719 dcqs.set_max_completed_queue(cg1r->red_zone()); 1720 } 1721 1722 int curr_queue_size = dcqs.completed_buffers_num(); 1723 if (curr_queue_size >= cg1r->yellow_zone()) { 1724 dcqs.set_completed_queue_padding(curr_queue_size); 1725 } else { 1726 dcqs.set_completed_queue_padding(0); 1727 } 1728 dcqs.notify_if_necessary(); 1729 } 1730 1731 double 1732 G1CollectorPolicy:: 1733 predict_young_collection_elapsed_time_ms(size_t adjustment) { 1734 guarantee( adjustment == 0 || adjustment == 1, "invariant" ); 1735 1736 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1737 size_t young_num = g1h->young_list()->length(); 1738 if (young_num == 0) 1739 return 0.0; 1740 1741 young_num += adjustment; 1742 size_t pending_cards = predict_pending_cards(); 1743 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() + 1744 predict_rs_length_diff(); 1745 size_t card_num; 1746 if (full_young_gcs()) 1747 card_num = predict_young_card_num(rs_lengths); 1748 else 1749 card_num = predict_non_young_card_num(rs_lengths); 1750 size_t young_byte_size = young_num * HeapRegion::GrainBytes; 1751 double accum_yg_surv_rate = 1752 _short_lived_surv_rate_group->accum_surv_rate(adjustment); 1753 1754 size_t bytes_to_copy = 1755 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes); 1756 1757 return 1758 predict_rs_update_time_ms(pending_cards) + 1759 predict_rs_scan_time_ms(card_num) + 1760 predict_object_copy_time_ms(bytes_to_copy) + 1761 predict_young_other_time_ms(young_num) + 1762 predict_constant_other_time_ms(); 1763 } 1764 1765 double 1766 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1767 size_t rs_length = predict_rs_length_diff(); 1768 size_t card_num; 1769 if (full_young_gcs()) 1770 card_num = predict_young_card_num(rs_length); 1771 else 1772 card_num = predict_non_young_card_num(rs_length); 1773 return predict_base_elapsed_time_ms(pending_cards, card_num); 1774 } 1775 1776 double 1777 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1778 size_t scanned_cards) { 1779 return 1780 predict_rs_update_time_ms(pending_cards) + 1781 predict_rs_scan_time_ms(scanned_cards) + 1782 predict_constant_other_time_ms(); 1783 } 1784 1785 double 1786 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1787 bool young) { 1788 size_t rs_length = hr->rem_set()->occupied(); 1789 size_t card_num; 1790 if (full_young_gcs()) 1791 card_num = predict_young_card_num(rs_length); 1792 else 1793 card_num = predict_non_young_card_num(rs_length); 1794 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1795 1796 double region_elapsed_time_ms = 1797 predict_rs_scan_time_ms(card_num) + 1798 predict_object_copy_time_ms(bytes_to_copy); 1799 1800 if (young) 1801 region_elapsed_time_ms += predict_young_other_time_ms(1); 1802 else 1803 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1804 1805 return region_elapsed_time_ms; 1806 } 1807 1808 size_t 1809 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1810 size_t bytes_to_copy; 1811 if (hr->is_marked()) 1812 bytes_to_copy = hr->max_live_bytes(); 1813 else { 1814 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1, 1815 "invariant" ); 1816 int age = hr->age_in_surv_rate_group(); 1817 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1818 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1819 } 1820 1821 return bytes_to_copy; 1822 } 1823 1824 void 1825 G1CollectorPolicy::start_recording_regions() { 1826 _recorded_rs_lengths = 0; 1827 _recorded_young_regions = 0; 1828 _recorded_non_young_regions = 0; 1829 1830 #if PREDICTIONS_VERBOSE 1831 _recorded_marked_bytes = 0; 1832 _recorded_young_bytes = 0; 1833 _predicted_bytes_to_copy = 0; 1834 _predicted_rs_lengths = 0; 1835 _predicted_cards_scanned = 0; 1836 #endif // PREDICTIONS_VERBOSE 1837 } 1838 1839 void 1840 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) { 1841 #if PREDICTIONS_VERBOSE 1842 if (!young) { 1843 _recorded_marked_bytes += hr->max_live_bytes(); 1844 } 1845 _predicted_bytes_to_copy += predict_bytes_to_copy(hr); 1846 #endif // PREDICTIONS_VERBOSE 1847 1848 size_t rs_length = hr->rem_set()->occupied(); 1849 _recorded_rs_lengths += rs_length; 1850 } 1851 1852 void 1853 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) { 1854 assert(!hr->is_young(), "should not call this"); 1855 ++_recorded_non_young_regions; 1856 record_cset_region_info(hr, false); 1857 } 1858 1859 void 1860 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) { 1861 _recorded_young_regions = n_regions; 1862 } 1863 1864 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) { 1865 #if PREDICTIONS_VERBOSE 1866 _recorded_young_bytes = bytes; 1867 #endif // PREDICTIONS_VERBOSE 1868 } 1869 1870 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1871 _recorded_rs_lengths = rs_lengths; 1872 } 1873 1874 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) { 1875 _predicted_bytes_to_copy = bytes; 1876 } 1877 1878 void 1879 G1CollectorPolicy::end_recording_regions() { 1880 // The _predicted_pause_time_ms field is referenced in code 1881 // not under PREDICTIONS_VERBOSE. Let's initialize it. 1882 _predicted_pause_time_ms = -1.0; 1883 1884 #if PREDICTIONS_VERBOSE 1885 _predicted_pending_cards = predict_pending_cards(); 1886 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff(); 1887 if (full_young_gcs()) 1888 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths); 1889 else 1890 _predicted_cards_scanned += 1891 predict_non_young_card_num(_predicted_rs_lengths); 1892 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions; 1893 1894 _predicted_rs_update_time_ms = 1895 predict_rs_update_time_ms(_g1->pending_card_num()); 1896 _predicted_rs_scan_time_ms = 1897 predict_rs_scan_time_ms(_predicted_cards_scanned); 1898 _predicted_object_copy_time_ms = 1899 predict_object_copy_time_ms(_predicted_bytes_to_copy); 1900 _predicted_constant_other_time_ms = 1901 predict_constant_other_time_ms(); 1902 _predicted_young_other_time_ms = 1903 predict_young_other_time_ms(_recorded_young_regions); 1904 _predicted_non_young_other_time_ms = 1905 predict_non_young_other_time_ms(_recorded_non_young_regions); 1906 1907 _predicted_pause_time_ms = 1908 _predicted_rs_update_time_ms + 1909 _predicted_rs_scan_time_ms + 1910 _predicted_object_copy_time_ms + 1911 _predicted_constant_other_time_ms + 1912 _predicted_young_other_time_ms + 1913 _predicted_non_young_other_time_ms; 1914 #endif // PREDICTIONS_VERBOSE 1915 } 1916 1917 void G1CollectorPolicy::check_if_region_is_too_expensive(double 1918 predicted_time_ms) { 1919 // I don't think we need to do this when in young GC mode since 1920 // marking will be initiated next time we hit the soft limit anyway... 1921 if (predicted_time_ms > _expensive_region_limit_ms) { 1922 if (!in_young_gc_mode()) { 1923 set_full_young_gcs(true); 1924 // We might want to do something different here. However, 1925 // right now we don't support the non-generational G1 mode 1926 // (and in fact we are planning to remove the associated code, 1927 // see CR 6814390). So, let's leave it as is and this will be 1928 // removed some time in the future 1929 ShouldNotReachHere(); 1930 set_during_initial_mark_pause(); 1931 } else 1932 // no point in doing another partial one 1933 _should_revert_to_full_young_gcs = true; 1934 } 1935 } 1936 1937 // </NEW PREDICTION> 1938 1939 1940 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1941 double elapsed_ms) { 1942 _recent_gc_times_ms->add(elapsed_ms); 1943 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1944 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1945 } 1946 1947 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() { 1948 if (_recent_pause_times_ms->num() == 0) { 1949 return (double) MaxGCPauseMillis; 1950 } 1951 return _recent_pause_times_ms->avg(); 1952 } 1953 1954 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() { 1955 if (_recent_rs_scan_times_ms->num() == 0) { 1956 return (double)MaxGCPauseMillis/3.0; 1957 } 1958 return _recent_rs_scan_times_ms->avg(); 1959 } 1960 1961 int G1CollectorPolicy::number_of_recent_gcs() { 1962 assert(_recent_rs_scan_times_ms->num() == 1963 _recent_pause_times_ms->num(), "Sequence out of sync"); 1964 assert(_recent_pause_times_ms->num() == 1965 _recent_CS_bytes_used_before->num(), "Sequence out of sync"); 1966 assert(_recent_CS_bytes_used_before->num() == 1967 _recent_CS_bytes_surviving->num(), "Sequence out of sync"); 1968 1969 return _recent_pause_times_ms->num(); 1970 } 1971 1972 double G1CollectorPolicy::recent_avg_survival_fraction() { 1973 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving, 1974 _recent_CS_bytes_used_before); 1975 } 1976 1977 double G1CollectorPolicy::last_survival_fraction() { 1978 return last_survival_fraction_work(_recent_CS_bytes_surviving, 1979 _recent_CS_bytes_used_before); 1980 } 1981 1982 double 1983 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving, 1984 TruncatedSeq* before) { 1985 assert(surviving->num() == before->num(), "Sequence out of sync"); 1986 if (before->sum() > 0.0) { 1987 double recent_survival_rate = surviving->sum() / before->sum(); 1988 // We exempt parallel collection from this check because Alloc Buffer 1989 // fragmentation can produce negative collections. 1990 // Further, we're now always doing parallel collection. But I'm still 1991 // leaving this here as a placeholder for a more precise assertion later. 1992 // (DLD, 10/05.) 1993 assert((true || G1CollectedHeap::use_parallel_gc_threads()) || 1994 _g1->evacuation_failed() || 1995 recent_survival_rate <= 1.0, "Or bad frac"); 1996 return recent_survival_rate; 1997 } else { 1998 return 1.0; // Be conservative. 1999 } 2000 } 2001 2002 double 2003 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving, 2004 TruncatedSeq* before) { 2005 assert(surviving->num() == before->num(), "Sequence out of sync"); 2006 if (surviving->num() > 0 && before->last() > 0.0) { 2007 double last_survival_rate = surviving->last() / before->last(); 2008 // We exempt parallel collection from this check because Alloc Buffer 2009 // fragmentation can produce negative collections. 2010 // Further, we're now always doing parallel collection. But I'm still 2011 // leaving this here as a placeholder for a more precise assertion later. 2012 // (DLD, 10/05.) 2013 assert((true || G1CollectedHeap::use_parallel_gc_threads()) || 2014 last_survival_rate <= 1.0, "Or bad frac"); 2015 return last_survival_rate; 2016 } else { 2017 return 1.0; 2018 } 2019 } 2020 2021 static const int survival_min_obs = 5; 2022 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 }; 2023 static const double min_survival_rate = 0.1; 2024 2025 double 2026 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg, 2027 double latest) { 2028 double res = avg; 2029 if (number_of_recent_gcs() < survival_min_obs) { 2030 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]); 2031 } 2032 res = MAX2(res, latest); 2033 res = MAX2(res, min_survival_rate); 2034 // In the parallel case, LAB fragmentation can produce "negative 2035 // collections"; so can evac failure. Cap at 1.0 2036 res = MIN2(res, 1.0); 2037 return res; 2038 } 2039 2040 size_t G1CollectorPolicy::expansion_amount() { 2041 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) { 2042 // We will double the existing space, or take 2043 // G1ExpandByPercentOfAvailable % of the available expansion 2044 // space, whichever is smaller, bounded below by a minimum 2045 // expansion (unless that's all that's left.) 2046 const size_t min_expand_bytes = 1*M; 2047 size_t reserved_bytes = _g1->max_capacity(); 2048 size_t committed_bytes = _g1->capacity(); 2049 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 2050 size_t expand_bytes; 2051 size_t expand_bytes_via_pct = 2052 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 2053 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 2054 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 2055 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 2056 if (G1PolicyVerbose > 1) { 2057 gclog_or_tty->print("Decided to expand: ratio = %5.2f, " 2058 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n" 2059 " Answer = %d.\n", 2060 recent_avg_pause_time_ratio(), 2061 byte_size_in_proper_unit(committed_bytes), 2062 proper_unit_for_byte_size(committed_bytes), 2063 byte_size_in_proper_unit(uncommitted_bytes), 2064 proper_unit_for_byte_size(uncommitted_bytes), 2065 byte_size_in_proper_unit(expand_bytes_via_pct), 2066 proper_unit_for_byte_size(expand_bytes_via_pct), 2067 byte_size_in_proper_unit(expand_bytes), 2068 proper_unit_for_byte_size(expand_bytes)); 2069 } 2070 return expand_bytes; 2071 } else { 2072 return 0; 2073 } 2074 } 2075 2076 void G1CollectorPolicy::note_start_of_mark_thread() { 2077 _mark_thread_startup_sec = os::elapsedTime(); 2078 } 2079 2080 class CountCSClosure: public HeapRegionClosure { 2081 G1CollectorPolicy* _g1_policy; 2082 public: 2083 CountCSClosure(G1CollectorPolicy* g1_policy) : 2084 _g1_policy(g1_policy) {} 2085 bool doHeapRegion(HeapRegion* r) { 2086 _g1_policy->_bytes_in_collection_set_before_gc += r->used(); 2087 return false; 2088 } 2089 }; 2090 2091 void G1CollectorPolicy::count_CS_bytes_used() { 2092 CountCSClosure cs_closure(this); 2093 _g1->collection_set_iterate(&cs_closure); 2094 } 2095 2096 void G1CollectorPolicy::print_summary (int level, 2097 const char* str, 2098 NumberSeq* seq) const { 2099 double sum = seq->sum(); 2100 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", 2101 str, sum / 1000.0, seq->avg()); 2102 } 2103 2104 void G1CollectorPolicy::print_summary_sd (int level, 2105 const char* str, 2106 NumberSeq* seq) const { 2107 print_summary(level, str, seq); 2108 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2109 seq->num(), seq->sd(), seq->maximum()); 2110 } 2111 2112 void G1CollectorPolicy::check_other_times(int level, 2113 NumberSeq* other_times_ms, 2114 NumberSeq* calc_other_times_ms) const { 2115 bool should_print = false; 2116 LineBuffer buf(level + 2); 2117 2118 double max_sum = MAX2(fabs(other_times_ms->sum()), 2119 fabs(calc_other_times_ms->sum())); 2120 double min_sum = MIN2(fabs(other_times_ms->sum()), 2121 fabs(calc_other_times_ms->sum())); 2122 double sum_ratio = max_sum / min_sum; 2123 if (sum_ratio > 1.1) { 2124 should_print = true; 2125 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); 2126 } 2127 2128 double max_avg = MAX2(fabs(other_times_ms->avg()), 2129 fabs(calc_other_times_ms->avg())); 2130 double min_avg = MIN2(fabs(other_times_ms->avg()), 2131 fabs(calc_other_times_ms->avg())); 2132 double avg_ratio = max_avg / min_avg; 2133 if (avg_ratio > 1.1) { 2134 should_print = true; 2135 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); 2136 } 2137 2138 if (other_times_ms->sum() < -0.01) { 2139 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); 2140 } 2141 2142 if (other_times_ms->avg() < -0.01) { 2143 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); 2144 } 2145 2146 if (calc_other_times_ms->sum() < -0.01) { 2147 should_print = true; 2148 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); 2149 } 2150 2151 if (calc_other_times_ms->avg() < -0.01) { 2152 should_print = true; 2153 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); 2154 } 2155 2156 if (should_print) 2157 print_summary(level, "Other(Calc)", calc_other_times_ms); 2158 } 2159 2160 void G1CollectorPolicy::print_summary(PauseSummary* summary) const { 2161 bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 2162 MainBodySummary* body_summary = summary->main_body_summary(); 2163 if (summary->get_total_seq()->num() > 0) { 2164 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq()); 2165 if (body_summary != NULL) { 2166 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq()); 2167 if (parallel) { 2168 print_summary(1, "Parallel Time", body_summary->get_parallel_seq()); 2169 print_summary(2, "Update RS", body_summary->get_update_rs_seq()); 2170 print_summary(2, "Ext Root Scanning", 2171 body_summary->get_ext_root_scan_seq()); 2172 print_summary(2, "Mark Stack Scanning", 2173 body_summary->get_mark_stack_scan_seq()); 2174 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq()); 2175 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq()); 2176 print_summary(2, "Termination", body_summary->get_termination_seq()); 2177 print_summary(2, "Other", body_summary->get_parallel_other_seq()); 2178 { 2179 NumberSeq* other_parts[] = { 2180 body_summary->get_update_rs_seq(), 2181 body_summary->get_ext_root_scan_seq(), 2182 body_summary->get_mark_stack_scan_seq(), 2183 body_summary->get_scan_rs_seq(), 2184 body_summary->get_obj_copy_seq(), 2185 body_summary->get_termination_seq() 2186 }; 2187 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(), 2188 6, other_parts); 2189 check_other_times(2, body_summary->get_parallel_other_seq(), 2190 &calc_other_times_ms); 2191 } 2192 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq()); 2193 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq()); 2194 } else { 2195 print_summary(1, "Update RS", body_summary->get_update_rs_seq()); 2196 print_summary(1, "Ext Root Scanning", 2197 body_summary->get_ext_root_scan_seq()); 2198 print_summary(1, "Mark Stack Scanning", 2199 body_summary->get_mark_stack_scan_seq()); 2200 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq()); 2201 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq()); 2202 } 2203 } 2204 print_summary(1, "Other", summary->get_other_seq()); 2205 { 2206 if (body_summary != NULL) { 2207 NumberSeq calc_other_times_ms; 2208 if (parallel) { 2209 // parallel 2210 NumberSeq* other_parts[] = { 2211 body_summary->get_satb_drain_seq(), 2212 body_summary->get_parallel_seq(), 2213 body_summary->get_clear_ct_seq() 2214 }; 2215 calc_other_times_ms = NumberSeq(summary->get_total_seq(), 2216 3, other_parts); 2217 } else { 2218 // serial 2219 NumberSeq* other_parts[] = { 2220 body_summary->get_satb_drain_seq(), 2221 body_summary->get_update_rs_seq(), 2222 body_summary->get_ext_root_scan_seq(), 2223 body_summary->get_mark_stack_scan_seq(), 2224 body_summary->get_scan_rs_seq(), 2225 body_summary->get_obj_copy_seq() 2226 }; 2227 calc_other_times_ms = NumberSeq(summary->get_total_seq(), 2228 6, other_parts); 2229 } 2230 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms); 2231 } 2232 } 2233 } else { 2234 LineBuffer(1).append_and_print_cr("none"); 2235 } 2236 LineBuffer(0).append_and_print_cr(""); 2237 } 2238 2239 void G1CollectorPolicy::print_tracing_info() const { 2240 if (TraceGen0Time) { 2241 gclog_or_tty->print_cr("ALL PAUSES"); 2242 print_summary_sd(0, "Total", _all_pause_times_ms); 2243 gclog_or_tty->print_cr(""); 2244 gclog_or_tty->print_cr(""); 2245 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num); 2246 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num); 2247 gclog_or_tty->print_cr(""); 2248 2249 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2250 print_summary(_summary); 2251 2252 gclog_or_tty->print_cr("MISC"); 2253 print_summary_sd(0, "Stop World", _all_stop_world_times_ms); 2254 print_summary_sd(0, "Yields", _all_yield_times_ms); 2255 for (int i = 0; i < _aux_num; ++i) { 2256 if (_all_aux_times_ms[i].num() > 0) { 2257 char buffer[96]; 2258 sprintf(buffer, "Aux%d", i); 2259 print_summary_sd(0, buffer, &_all_aux_times_ms[i]); 2260 } 2261 } 2262 2263 size_t all_region_num = _region_num_young + _region_num_tenured; 2264 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), " 2265 "Tenured %8d (%6.2lf%%)", 2266 all_region_num, 2267 _region_num_young, 2268 (double) _region_num_young / (double) all_region_num * 100.0, 2269 _region_num_tenured, 2270 (double) _region_num_tenured / (double) all_region_num * 100.0); 2271 } 2272 if (TraceGen1Time) { 2273 if (_all_full_gc_times_ms->num() > 0) { 2274 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2275 _all_full_gc_times_ms->num(), 2276 _all_full_gc_times_ms->sum() / 1000.0); 2277 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg()); 2278 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2279 _all_full_gc_times_ms->sd(), 2280 _all_full_gc_times_ms->maximum()); 2281 } 2282 } 2283 } 2284 2285 void G1CollectorPolicy::print_yg_surv_rate_info() const { 2286 #ifndef PRODUCT 2287 _short_lived_surv_rate_group->print_surv_rate_summary(); 2288 // add this call for any other surv rate groups 2289 #endif // PRODUCT 2290 } 2291 2292 void 2293 G1CollectorPolicy::update_region_num(bool young) { 2294 if (young) { 2295 ++_region_num_young; 2296 } else { 2297 ++_region_num_tenured; 2298 } 2299 } 2300 2301 #ifndef PRODUCT 2302 // for debugging, bit of a hack... 2303 static char* 2304 region_num_to_mbs(int length) { 2305 static char buffer[64]; 2306 double bytes = (double) (length * HeapRegion::GrainBytes); 2307 double mbs = bytes / (double) (1024 * 1024); 2308 sprintf(buffer, "%7.2lfMB", mbs); 2309 return buffer; 2310 } 2311 #endif // PRODUCT 2312 2313 size_t G1CollectorPolicy::max_regions(int purpose) { 2314 switch (purpose) { 2315 case GCAllocForSurvived: 2316 return _max_survivor_regions; 2317 case GCAllocForTenured: 2318 return REGIONS_UNLIMITED; 2319 default: 2320 ShouldNotReachHere(); 2321 return REGIONS_UNLIMITED; 2322 }; 2323 } 2324 2325 void G1CollectorPolicy::calculate_max_gc_locker_expansion() { 2326 size_t expansion_region_num = 0; 2327 if (GCLockerEdenExpansionPercent > 0) { 2328 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 2329 double expansion_region_num_d = perc * (double) _young_list_target_length; 2330 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 2331 // less than 1.0) we'll get 1. 2332 expansion_region_num = (size_t) ceil(expansion_region_num_d); 2333 } else { 2334 assert(expansion_region_num == 0, "sanity"); 2335 } 2336 _young_list_max_length = _young_list_target_length + expansion_region_num; 2337 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 2338 } 2339 2340 // Calculates survivor space parameters. 2341 void G1CollectorPolicy::calculate_survivors_policy() 2342 { 2343 if (G1FixedSurvivorSpaceSize == 0) { 2344 _max_survivor_regions = _young_list_target_length / SurvivorRatio; 2345 } else { 2346 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; 2347 } 2348 2349 if (G1FixedTenuringThreshold) { 2350 _tenuring_threshold = MaxTenuringThreshold; 2351 } else { 2352 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 2353 HeapRegion::GrainWords * _max_survivor_regions); 2354 } 2355 } 2356 2357 #ifndef PRODUCT 2358 class HRSortIndexIsOKClosure: public HeapRegionClosure { 2359 CollectionSetChooser* _chooser; 2360 public: 2361 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) : 2362 _chooser(chooser) {} 2363 2364 bool doHeapRegion(HeapRegion* r) { 2365 if (!r->continuesHumongous()) { 2366 assert(_chooser->regionProperlyOrdered(r), "Ought to be."); 2367 } 2368 return false; 2369 } 2370 }; 2371 2372 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() { 2373 HRSortIndexIsOKClosure cl(_collectionSetChooser); 2374 _g1->heap_region_iterate(&cl); 2375 return true; 2376 } 2377 #endif 2378 2379 bool 2380 G1CollectorPolicy::force_initial_mark_if_outside_cycle() { 2381 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 2382 if (!during_cycle) { 2383 set_initiate_conc_mark_if_possible(); 2384 return true; 2385 } else { 2386 return false; 2387 } 2388 } 2389 2390 void 2391 G1CollectorPolicy::decide_on_conc_mark_initiation() { 2392 // We are about to decide on whether this pause will be an 2393 // initial-mark pause. 2394 2395 // First, during_initial_mark_pause() should not be already set. We 2396 // will set it here if we have to. However, it should be cleared by 2397 // the end of the pause (it's only set for the duration of an 2398 // initial-mark pause). 2399 assert(!during_initial_mark_pause(), "pre-condition"); 2400 2401 if (initiate_conc_mark_if_possible()) { 2402 // We had noticed on a previous pause that the heap occupancy has 2403 // gone over the initiating threshold and we should start a 2404 // concurrent marking cycle. So we might initiate one. 2405 2406 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 2407 if (!during_cycle) { 2408 // The concurrent marking thread is not "during a cycle", i.e., 2409 // it has completed the last one. So we can go ahead and 2410 // initiate a new cycle. 2411 2412 set_during_initial_mark_pause(); 2413 2414 // And we can now clear initiate_conc_mark_if_possible() as 2415 // we've already acted on it. 2416 clear_initiate_conc_mark_if_possible(); 2417 } else { 2418 // The concurrent marking thread is still finishing up the 2419 // previous cycle. If we start one right now the two cycles 2420 // overlap. In particular, the concurrent marking thread might 2421 // be in the process of clearing the next marking bitmap (which 2422 // we will use for the next cycle if we start one). Starting a 2423 // cycle now will be bad given that parts of the marking 2424 // information might get cleared by the marking thread. And we 2425 // cannot wait for the marking thread to finish the cycle as it 2426 // periodically yields while clearing the next marking bitmap 2427 // and, if it's in a yield point, it's waiting for us to 2428 // finish. So, at this point we will not start a cycle and we'll 2429 // let the concurrent marking thread complete the last one. 2430 } 2431 } 2432 } 2433 2434 void 2435 G1CollectorPolicy_BestRegionsFirst:: 2436 record_collection_pause_start(double start_time_sec, size_t start_used) { 2437 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); 2438 } 2439 2440 class KnownGarbageClosure: public HeapRegionClosure { 2441 CollectionSetChooser* _hrSorted; 2442 2443 public: 2444 KnownGarbageClosure(CollectionSetChooser* hrSorted) : 2445 _hrSorted(hrSorted) 2446 {} 2447 2448 bool doHeapRegion(HeapRegion* r) { 2449 // We only include humongous regions in collection 2450 // sets when concurrent mark shows that their contained object is 2451 // unreachable. 2452 2453 // Do we have any marking information for this region? 2454 if (r->is_marked()) { 2455 // We don't include humongous regions in collection 2456 // sets because we collect them immediately at the end of a marking 2457 // cycle. We also don't include young regions because we *must* 2458 // include them in the next collection pause. 2459 if (!r->isHumongous() && !r->is_young()) { 2460 _hrSorted->addMarkedHeapRegion(r); 2461 } 2462 } 2463 return false; 2464 } 2465 }; 2466 2467 class ParKnownGarbageHRClosure: public HeapRegionClosure { 2468 CollectionSetChooser* _hrSorted; 2469 jint _marked_regions_added; 2470 jint _chunk_size; 2471 jint _cur_chunk_idx; 2472 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) 2473 int _worker; 2474 int _invokes; 2475 2476 void get_new_chunk() { 2477 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size); 2478 _cur_chunk_end = _cur_chunk_idx + _chunk_size; 2479 } 2480 void add_region(HeapRegion* r) { 2481 if (_cur_chunk_idx == _cur_chunk_end) { 2482 get_new_chunk(); 2483 } 2484 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); 2485 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); 2486 _marked_regions_added++; 2487 _cur_chunk_idx++; 2488 } 2489 2490 public: 2491 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 2492 jint chunk_size, 2493 int worker) : 2494 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), 2495 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0), 2496 _invokes(0) 2497 {} 2498 2499 bool doHeapRegion(HeapRegion* r) { 2500 // We only include humongous regions in collection 2501 // sets when concurrent mark shows that their contained object is 2502 // unreachable. 2503 _invokes++; 2504 2505 // Do we have any marking information for this region? 2506 if (r->is_marked()) { 2507 // We don't include humongous regions in collection 2508 // sets because we collect them immediately at the end of a marking 2509 // cycle. 2510 // We also do not include young regions in collection sets 2511 if (!r->isHumongous() && !r->is_young()) { 2512 add_region(r); 2513 } 2514 } 2515 return false; 2516 } 2517 jint marked_regions_added() { return _marked_regions_added; } 2518 int invokes() { return _invokes; } 2519 }; 2520 2521 class ParKnownGarbageTask: public AbstractGangTask { 2522 CollectionSetChooser* _hrSorted; 2523 jint _chunk_size; 2524 G1CollectedHeap* _g1; 2525 public: 2526 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : 2527 AbstractGangTask("ParKnownGarbageTask"), 2528 _hrSorted(hrSorted), _chunk_size(chunk_size), 2529 _g1(G1CollectedHeap::heap()) 2530 {} 2531 2532 void work(int i) { 2533 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i); 2534 // Back to zero for the claim value. 2535 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, 2536 HeapRegion::InitialClaimValue); 2537 jint regions_added = parKnownGarbageCl.marked_regions_added(); 2538 _hrSorted->incNumMarkedHeapRegions(regions_added); 2539 if (G1PrintParCleanupStats) { 2540 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", 2541 i, parKnownGarbageCl.invokes(), regions_added); 2542 } 2543 } 2544 }; 2545 2546 void 2547 G1CollectorPolicy_BestRegionsFirst:: 2548 record_concurrent_mark_cleanup_end(size_t freed_bytes, 2549 size_t max_live_bytes) { 2550 double start; 2551 if (G1PrintParCleanupStats) start = os::elapsedTime(); 2552 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); 2553 2554 _collectionSetChooser->clearMarkedHeapRegions(); 2555 double clear_marked_end; 2556 if (G1PrintParCleanupStats) { 2557 clear_marked_end = os::elapsedTime(); 2558 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.", 2559 (clear_marked_end - start)*1000.0); 2560 } 2561 if (G1CollectedHeap::use_parallel_gc_threads()) { 2562 const size_t OverpartitionFactor = 4; 2563 const size_t MinWorkUnit = 8; 2564 const size_t WorkUnit = 2565 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), 2566 MinWorkUnit); 2567 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), 2568 WorkUnit); 2569 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 2570 (int) WorkUnit); 2571 _g1->workers()->run_task(&parKnownGarbageTask); 2572 2573 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2574 "sanity check"); 2575 } else { 2576 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 2577 _g1->heap_region_iterate(&knownGarbagecl); 2578 } 2579 double known_garbage_end; 2580 if (G1PrintParCleanupStats) { 2581 known_garbage_end = os::elapsedTime(); 2582 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", 2583 (known_garbage_end - clear_marked_end)*1000.0); 2584 } 2585 _collectionSetChooser->sortMarkedHeapRegions(); 2586 double sort_end; 2587 if (G1PrintParCleanupStats) { 2588 sort_end = os::elapsedTime(); 2589 gclog_or_tty->print_cr(" sorting: %8.3f ms.", 2590 (sort_end - known_garbage_end)*1000.0); 2591 } 2592 2593 record_concurrent_mark_cleanup_end_work2(); 2594 double work2_end; 2595 if (G1PrintParCleanupStats) { 2596 work2_end = os::elapsedTime(); 2597 gclog_or_tty->print_cr(" work2: %8.3f ms.", 2598 (work2_end - sort_end)*1000.0); 2599 } 2600 } 2601 2602 // Add the heap region at the head of the non-incremental collection set 2603 void G1CollectorPolicy:: 2604 add_to_collection_set(HeapRegion* hr) { 2605 assert(_inc_cset_build_state == Active, "Precondition"); 2606 assert(!hr->is_young(), "non-incremental add of young region"); 2607 2608 if (_g1->mark_in_progress()) 2609 _g1->concurrent_mark()->registerCSetRegion(hr); 2610 2611 assert(!hr->in_collection_set(), "should not already be in the CSet"); 2612 hr->set_in_collection_set(true); 2613 hr->set_next_in_collection_set(_collection_set); 2614 _collection_set = hr; 2615 _collection_set_size++; 2616 _collection_set_bytes_used_before += hr->used(); 2617 _g1->register_region_with_in_cset_fast_test(hr); 2618 } 2619 2620 // Initialize the per-collection-set information 2621 void G1CollectorPolicy::start_incremental_cset_building() { 2622 assert(_inc_cset_build_state == Inactive, "Precondition"); 2623 2624 _inc_cset_head = NULL; 2625 _inc_cset_tail = NULL; 2626 _inc_cset_size = 0; 2627 _inc_cset_bytes_used_before = 0; 2628 2629 if (in_young_gc_mode()) { 2630 _inc_cset_young_index = 0; 2631 } 2632 2633 _inc_cset_max_finger = 0; 2634 _inc_cset_recorded_young_bytes = 0; 2635 _inc_cset_recorded_rs_lengths = 0; 2636 _inc_cset_predicted_elapsed_time_ms = 0; 2637 _inc_cset_predicted_bytes_to_copy = 0; 2638 _inc_cset_build_state = Active; 2639 } 2640 2641 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 2642 // This routine is used when: 2643 // * adding survivor regions to the incremental cset at the end of an 2644 // evacuation pause, 2645 // * adding the current allocation region to the incremental cset 2646 // when it is retired, and 2647 // * updating existing policy information for a region in the 2648 // incremental cset via young list RSet sampling. 2649 // Therefore this routine may be called at a safepoint by the 2650 // VM thread, or in-between safepoints by mutator threads (when 2651 // retiring the current allocation region) or a concurrent 2652 // refine thread (RSet sampling). 2653 2654 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true); 2655 size_t used_bytes = hr->used(); 2656 2657 _inc_cset_recorded_rs_lengths += rs_length; 2658 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 2659 2660 _inc_cset_bytes_used_before += used_bytes; 2661 2662 // Cache the values we have added to the aggregated informtion 2663 // in the heap region in case we have to remove this region from 2664 // the incremental collection set, or it is updated by the 2665 // rset sampling code 2666 hr->set_recorded_rs_length(rs_length); 2667 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 2668 2669 #if PREDICTIONS_VERBOSE 2670 size_t bytes_to_copy = predict_bytes_to_copy(hr); 2671 _inc_cset_predicted_bytes_to_copy += bytes_to_copy; 2672 2673 // Record the number of bytes used in this region 2674 _inc_cset_recorded_young_bytes += used_bytes; 2675 2676 // Cache the values we have added to the aggregated informtion 2677 // in the heap region in case we have to remove this region from 2678 // the incremental collection set, or it is updated by the 2679 // rset sampling code 2680 hr->set_predicted_bytes_to_copy(bytes_to_copy); 2681 #endif // PREDICTIONS_VERBOSE 2682 } 2683 2684 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) { 2685 // This routine is currently only called as part of the updating of 2686 // existing policy information for regions in the incremental cset that 2687 // is performed by the concurrent refine thread(s) as part of young list 2688 // RSet sampling. Therefore we should not be at a safepoint. 2689 2690 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 2691 assert(hr->is_young(), "it should be"); 2692 2693 size_t used_bytes = hr->used(); 2694 size_t old_rs_length = hr->recorded_rs_length(); 2695 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 2696 2697 // Subtract the old recorded/predicted policy information for 2698 // the given heap region from the collection set info. 2699 _inc_cset_recorded_rs_lengths -= old_rs_length; 2700 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms; 2701 2702 _inc_cset_bytes_used_before -= used_bytes; 2703 2704 // Clear the values cached in the heap region 2705 hr->set_recorded_rs_length(0); 2706 hr->set_predicted_elapsed_time_ms(0); 2707 2708 #if PREDICTIONS_VERBOSE 2709 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy(); 2710 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy; 2711 2712 // Subtract the number of bytes used in this region 2713 _inc_cset_recorded_young_bytes -= used_bytes; 2714 2715 // Clear the values cached in the heap region 2716 hr->set_predicted_bytes_to_copy(0); 2717 #endif // PREDICTIONS_VERBOSE 2718 } 2719 2720 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) { 2721 // Update the collection set information that is dependent on the new RS length 2722 assert(hr->is_young(), "Precondition"); 2723 2724 remove_from_incremental_cset_info(hr); 2725 add_to_incremental_cset_info(hr, new_rs_length); 2726 } 2727 2728 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 2729 assert( hr->is_young(), "invariant"); 2730 assert( hr->young_index_in_cset() == -1, "invariant" ); 2731 assert(_inc_cset_build_state == Active, "Precondition"); 2732 2733 // We need to clear and set the cached recorded/cached collection set 2734 // information in the heap region here (before the region gets added 2735 // to the collection set). An individual heap region's cached values 2736 // are calculated, aggregated with the policy collection set info, 2737 // and cached in the heap region here (initially) and (subsequently) 2738 // by the Young List sampling code. 2739 2740 size_t rs_length = hr->rem_set()->occupied(); 2741 add_to_incremental_cset_info(hr, rs_length); 2742 2743 HeapWord* hr_end = hr->end(); 2744 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 2745 2746 assert(!hr->in_collection_set(), "invariant"); 2747 hr->set_in_collection_set(true); 2748 assert( hr->next_in_collection_set() == NULL, "invariant"); 2749 2750 _inc_cset_size++; 2751 _g1->register_region_with_in_cset_fast_test(hr); 2752 2753 hr->set_young_index_in_cset((int) _inc_cset_young_index); 2754 ++_inc_cset_young_index; 2755 } 2756 2757 // Add the region at the RHS of the incremental cset 2758 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 2759 // We should only ever be appending survivors at the end of a pause 2760 assert( hr->is_survivor(), "Logic"); 2761 2762 // Do the 'common' stuff 2763 add_region_to_incremental_cset_common(hr); 2764 2765 // Now add the region at the right hand side 2766 if (_inc_cset_tail == NULL) { 2767 assert(_inc_cset_head == NULL, "invariant"); 2768 _inc_cset_head = hr; 2769 } else { 2770 _inc_cset_tail->set_next_in_collection_set(hr); 2771 } 2772 _inc_cset_tail = hr; 2773 } 2774 2775 // Add the region to the LHS of the incremental cset 2776 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 2777 // Survivors should be added to the RHS at the end of a pause 2778 assert(!hr->is_survivor(), "Logic"); 2779 2780 // Do the 'common' stuff 2781 add_region_to_incremental_cset_common(hr); 2782 2783 // Add the region at the left hand side 2784 hr->set_next_in_collection_set(_inc_cset_head); 2785 if (_inc_cset_head == NULL) { 2786 assert(_inc_cset_tail == NULL, "Invariant"); 2787 _inc_cset_tail = hr; 2788 } 2789 _inc_cset_head = hr; 2790 } 2791 2792 #ifndef PRODUCT 2793 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 2794 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 2795 2796 st->print_cr("\nCollection_set:"); 2797 HeapRegion* csr = list_head; 2798 while (csr != NULL) { 2799 HeapRegion* next = csr->next_in_collection_set(); 2800 assert(csr->in_collection_set(), "bad CS"); 2801 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " 2802 "age: %4d, y: %d, surv: %d", 2803 csr->bottom(), csr->end(), 2804 csr->top(), 2805 csr->prev_top_at_mark_start(), 2806 csr->next_top_at_mark_start(), 2807 csr->top_at_conc_mark_count(), 2808 csr->age_in_surv_rate_group_cond(), 2809 csr->is_young(), 2810 csr->is_survivor()); 2811 csr = next; 2812 } 2813 } 2814 #endif // !PRODUCT 2815 2816 void 2817 G1CollectorPolicy_BestRegionsFirst::choose_collection_set( 2818 double target_pause_time_ms) { 2819 // Set this here - in case we're not doing young collections. 2820 double non_young_start_time_sec = os::elapsedTime(); 2821 2822 start_recording_regions(); 2823 2824 guarantee(target_pause_time_ms > 0.0, 2825 err_msg("target_pause_time_ms = %1.6lf should be positive", 2826 target_pause_time_ms)); 2827 guarantee(_collection_set == NULL, "Precondition"); 2828 2829 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 2830 double predicted_pause_time_ms = base_time_ms; 2831 2832 double time_remaining_ms = target_pause_time_ms - base_time_ms; 2833 2834 // the 10% and 50% values are arbitrary... 2835 if (time_remaining_ms < 0.10 * target_pause_time_ms) { 2836 time_remaining_ms = 0.50 * target_pause_time_ms; 2837 _within_target = false; 2838 } else { 2839 _within_target = true; 2840 } 2841 2842 // We figure out the number of bytes available for future to-space. 2843 // For new regions without marking information, we must assume the 2844 // worst-case of complete survival. If we have marking information for a 2845 // region, we can bound the amount of live data. We can add a number of 2846 // such regions, as long as the sum of the live data bounds does not 2847 // exceed the available evacuation space. 2848 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes; 2849 2850 size_t expansion_bytes = 2851 _g1->expansion_regions() * HeapRegion::GrainBytes; 2852 2853 _collection_set_bytes_used_before = 0; 2854 _collection_set_size = 0; 2855 2856 // Adjust for expansion and slop. 2857 max_live_bytes = max_live_bytes + expansion_bytes; 2858 2859 HeapRegion* hr; 2860 if (in_young_gc_mode()) { 2861 double young_start_time_sec = os::elapsedTime(); 2862 2863 if (G1PolicyVerbose > 0) { 2864 gclog_or_tty->print_cr("Adding %d young regions to the CSet", 2865 _g1->young_list()->length()); 2866 } 2867 2868 _young_cset_length = 0; 2869 _last_young_gc_full = full_young_gcs() ? true : false; 2870 2871 if (_last_young_gc_full) 2872 ++_full_young_pause_num; 2873 else 2874 ++_partial_young_pause_num; 2875 2876 // The young list is laid with the survivor regions from the previous 2877 // pause are appended to the RHS of the young list, i.e. 2878 // [Newly Young Regions ++ Survivors from last pause]. 2879 2880 hr = _g1->young_list()->first_survivor_region(); 2881 while (hr != NULL) { 2882 assert(hr->is_survivor(), "badly formed young list"); 2883 hr->set_young(); 2884 hr = hr->get_next_young_region(); 2885 } 2886 2887 // Clear the fields that point to the survivor list - they are 2888 // all young now. 2889 _g1->young_list()->clear_survivors(); 2890 2891 if (_g1->mark_in_progress()) 2892 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger); 2893 2894 _young_cset_length = _inc_cset_young_index; 2895 _collection_set = _inc_cset_head; 2896 _collection_set_size = _inc_cset_size; 2897 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 2898 2899 // For young regions in the collection set, we assume the worst 2900 // case of complete survival 2901 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes; 2902 2903 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms; 2904 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 2905 2906 // The number of recorded young regions is the incremental 2907 // collection set's current size 2908 set_recorded_young_regions(_inc_cset_size); 2909 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 2910 set_recorded_young_bytes(_inc_cset_recorded_young_bytes); 2911 #if PREDICTIONS_VERBOSE 2912 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy); 2913 #endif // PREDICTIONS_VERBOSE 2914 2915 if (G1PolicyVerbose > 0) { 2916 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.", 2917 _inc_cset_size); 2918 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", 2919 max_live_bytes/K); 2920 } 2921 2922 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant"); 2923 2924 double young_end_time_sec = os::elapsedTime(); 2925 _recorded_young_cset_choice_time_ms = 2926 (young_end_time_sec - young_start_time_sec) * 1000.0; 2927 2928 // We are doing young collections so reset this. 2929 non_young_start_time_sec = young_end_time_sec; 2930 2931 // Note we can use either _collection_set_size or 2932 // _young_cset_length here 2933 if (_collection_set_size > 0 && _last_young_gc_full) { 2934 // don't bother adding more regions... 2935 goto choose_collection_set_end; 2936 } 2937 } 2938 2939 if (!in_young_gc_mode() || !full_young_gcs()) { 2940 bool should_continue = true; 2941 NumberSeq seq; 2942 double avg_prediction = 100000000000000000.0; // something very large 2943 2944 do { 2945 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, 2946 avg_prediction); 2947 if (hr != NULL) { 2948 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); 2949 time_remaining_ms -= predicted_time_ms; 2950 predicted_pause_time_ms += predicted_time_ms; 2951 add_to_collection_set(hr); 2952 record_non_young_cset_region(hr); 2953 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes); 2954 if (G1PolicyVerbose > 0) { 2955 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", 2956 max_live_bytes/K); 2957 } 2958 seq.add(predicted_time_ms); 2959 avg_prediction = seq.avg() + seq.sd(); 2960 } 2961 should_continue = 2962 ( hr != NULL) && 2963 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0 2964 : _collection_set_size < _young_list_fixed_length ); 2965 } while (should_continue); 2966 2967 if (!adaptive_young_list_length() && 2968 _collection_set_size < _young_list_fixed_length) 2969 _should_revert_to_full_young_gcs = true; 2970 } 2971 2972 choose_collection_set_end: 2973 stop_incremental_cset_building(); 2974 2975 count_CS_bytes_used(); 2976 2977 end_recording_regions(); 2978 2979 double non_young_end_time_sec = os::elapsedTime(); 2980 _recorded_non_young_cset_choice_time_ms = 2981 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; 2982 } 2983 2984 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { 2985 G1CollectorPolicy::record_full_collection_end(); 2986 _collectionSetChooser->updateAfterFullCollection(); 2987 } 2988 2989 void G1CollectorPolicy_BestRegionsFirst:: 2990 expand_if_possible(size_t numRegions) { 2991 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes; 2992 _g1->expand(expansion_bytes); 2993 } 2994 2995 void G1CollectorPolicy_BestRegionsFirst:: 2996 record_collection_pause_end() { 2997 G1CollectorPolicy::record_collection_pause_end(); 2998 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); 2999 }