1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentG1Refine.hpp" 27 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/shared/gcPolicyCounters.hpp" 33 #include "runtime/arguments.hpp" 34 #include "runtime/java.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "utilities/debug.hpp" 37 38 #define PREDICTIONS_VERBOSE 0 39 40 // <NEW PREDICTION> 41 42 // Different defaults for different number of GC threads 43 // They were chosen by running GCOld and SPECjbb on debris with different 44 // numbers of GC threads and choosing them based on the results 45 46 // all the same 47 static double rs_length_diff_defaults[] = { 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 49 }; 50 51 static double cost_per_card_ms_defaults[] = { 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 53 }; 54 55 // all the same 56 static double fully_young_cards_per_entry_ratio_defaults[] = { 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 58 }; 59 60 static double cost_per_entry_ms_defaults[] = { 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 62 }; 63 64 static double cost_per_byte_ms_defaults[] = { 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 66 }; 67 68 // these should be pretty consistent 69 static double constant_other_time_ms_defaults[] = { 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 71 }; 72 73 74 static double young_other_cost_per_region_ms_defaults[] = { 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 76 }; 77 78 static double non_young_other_cost_per_region_ms_defaults[] = { 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 80 }; 81 82 // </NEW PREDICTION> 83 84 // Help class for avoiding interleaved logging 85 class LineBuffer: public StackObj { 86 87 private: 88 static const int BUFFER_LEN = 1024; 89 static const int INDENT_CHARS = 3; 90 char _buffer[BUFFER_LEN]; 91 int _indent_level; 92 int _cur; 93 94 void vappend(const char* format, va_list ap) { 95 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap); 96 if (res != -1) { 97 _cur += res; 98 } else { 99 DEBUG_ONLY(warning("buffer too small in LineBuffer");) 100 _buffer[BUFFER_LEN -1] = 0; 101 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again 102 } 103 } 104 105 public: 106 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) { 107 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) { 108 _buffer[_cur] = ' '; 109 } 110 } 111 112 #ifndef PRODUCT 113 ~LineBuffer() { 114 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?"); 115 } 116 #endif 117 118 void append(const char* format, ...) { 119 va_list ap; 120 va_start(ap, format); 121 vappend(format, ap); 122 va_end(ap); 123 } 124 125 void append_and_print_cr(const char* format, ...) { 126 va_list ap; 127 va_start(ap, format); 128 vappend(format, ap); 129 va_end(ap); 130 gclog_or_tty->print_cr("%s", _buffer); 131 _cur = _indent_level * INDENT_CHARS; 132 } 133 }; 134 135 G1CollectorPolicy::G1CollectorPolicy() : 136 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 137 ? ParallelGCThreads : 1), 138 139 _n_pauses(0), 140 _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 141 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 142 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)), 143 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 144 _all_pause_times_ms(new NumberSeq()), 145 _stop_world_start(0.0), 146 _all_stop_world_times_ms(new NumberSeq()), 147 _all_yield_times_ms(new NumberSeq()), 148 149 _all_mod_union_times_ms(new NumberSeq()), 150 151 _summary(new Summary()), 152 153 _cur_clear_ct_time_ms(0.0), 154 155 _cur_ref_proc_time_ms(0.0), 156 _cur_ref_enq_time_ms(0.0), 157 158 #ifndef PRODUCT 159 _min_clear_cc_time_ms(-1.0), 160 _max_clear_cc_time_ms(-1.0), 161 _cur_clear_cc_time_ms(0.0), 162 _cum_clear_cc_time_ms(0.0), 163 _num_cc_clears(0L), 164 #endif 165 166 _region_num_young(0), 167 _region_num_tenured(0), 168 _prev_region_num_young(0), 169 _prev_region_num_tenured(0), 170 171 _aux_num(10), 172 _all_aux_times_ms(new NumberSeq[_aux_num]), 173 _cur_aux_start_times_ms(new double[_aux_num]), 174 _cur_aux_times_ms(new double[_aux_num]), 175 _cur_aux_times_set(new bool[_aux_num]), 176 177 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 178 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 179 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 180 181 // <NEW PREDICTION> 182 183 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 184 _prev_collection_pause_end_ms(0.0), 185 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 186 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 187 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 188 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 189 _partially_young_cards_per_entry_ratio_seq( 190 new TruncatedSeq(TruncatedSeqLength)), 191 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 192 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 193 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 194 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 195 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 196 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 197 _non_young_other_cost_per_region_ms_seq( 198 new TruncatedSeq(TruncatedSeqLength)), 199 200 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 201 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 202 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 203 204 _pause_time_target_ms((double) MaxGCPauseMillis), 205 206 // </NEW PREDICTION> 207 208 _in_young_gc_mode(false), 209 _full_young_gcs(true), 210 _full_young_pause_num(0), 211 _partial_young_pause_num(0), 212 213 _during_marking(false), 214 _in_marking_window(false), 215 _in_marking_window_im(false), 216 217 _known_garbage_ratio(0.0), 218 _known_garbage_bytes(0), 219 220 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), 221 222 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)), 223 224 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)), 225 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)), 226 227 _recent_avg_pause_time_ratio(0.0), 228 _num_markings(0), 229 _n_marks(0), 230 _n_pauses_at_mark_end(0), 231 232 _all_full_gc_times_ms(new NumberSeq()), 233 234 // G1PausesBtwnConcMark defaults to -1 235 // so the hack is to do the cast QQQ FIXME 236 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), 237 _n_marks_since_last_pause(0), 238 _initiate_conc_mark_if_possible(false), 239 _during_initial_mark_pause(false), 240 _should_revert_to_full_young_gcs(false), 241 _last_full_young_gc(false), 242 243 _eden_bytes_before_gc(0), 244 _survivor_bytes_before_gc(0), 245 _capacity_before_gc(0), 246 247 _prev_collection_pause_used_at_end_bytes(0), 248 249 _collection_set(NULL), 250 _collection_set_size(0), 251 _collection_set_bytes_used_before(0), 252 253 // Incremental CSet attributes 254 _inc_cset_build_state(Inactive), 255 _inc_cset_head(NULL), 256 _inc_cset_tail(NULL), 257 _inc_cset_size(0), 258 _inc_cset_young_index(0), 259 _inc_cset_bytes_used_before(0), 260 _inc_cset_max_finger(NULL), 261 _inc_cset_recorded_young_bytes(0), 262 _inc_cset_recorded_rs_lengths(0), 263 _inc_cset_predicted_elapsed_time_ms(0.0), 264 _inc_cset_predicted_bytes_to_copy(0), 265 266 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 267 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 268 #endif // _MSC_VER 269 270 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", 271 G1YoungSurvRateNumRegionsSummary)), 272 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", 273 G1YoungSurvRateNumRegionsSummary)), 274 // add here any more surv rate groups 275 _recorded_survivor_regions(0), 276 _recorded_survivor_head(NULL), 277 _recorded_survivor_tail(NULL), 278 _survivors_age_table(true), 279 280 _gc_overhead_perc(0.0) 281 282 { 283 // Set up the region size and associated fields. Given that the 284 // policy is created before the heap, we have to set this up here, 285 // so it's done as soon as possible. 286 HeapRegion::setup_heap_region_size(Arguments::min_heap_size()); 287 HeapRegionRemSet::setup_remset_size(); 288 289 // Verify PLAB sizes 290 const uint region_size = HeapRegion::GrainWords; 291 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 292 char buffer[128]; 293 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u", 294 OldPLABSize > region_size ? "Old" : "Young", region_size); 295 vm_exit_during_initialization(buffer); 296 } 297 298 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 299 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 300 301 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads]; 302 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; 303 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; 304 305 _par_last_update_rs_times_ms = new double[_parallel_gc_threads]; 306 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads]; 307 308 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads]; 309 310 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads]; 311 312 _par_last_termination_times_ms = new double[_parallel_gc_threads]; 313 _par_last_termination_attempts = new double[_parallel_gc_threads]; 314 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads]; 315 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads]; 316 317 // start conservatively 318 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; 319 320 // <NEW PREDICTION> 321 322 int index; 323 if (ParallelGCThreads == 0) 324 index = 0; 325 else if (ParallelGCThreads > 8) 326 index = 7; 327 else 328 index = ParallelGCThreads - 1; 329 330 _pending_card_diff_seq->add(0.0); 331 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 332 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 333 _fully_young_cards_per_entry_ratio_seq->add( 334 fully_young_cards_per_entry_ratio_defaults[index]); 335 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 336 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 337 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 338 _young_other_cost_per_region_ms_seq->add( 339 young_other_cost_per_region_ms_defaults[index]); 340 _non_young_other_cost_per_region_ms_seq->add( 341 non_young_other_cost_per_region_ms_defaults[index]); 342 343 // </NEW PREDICTION> 344 345 // Below, we might need to calculate the pause time target based on 346 // the pause interval. When we do so we are going to give G1 maximum 347 // flexibility and allow it to do pauses when it needs to. So, we'll 348 // arrange that the pause interval to be pause time target + 1 to 349 // ensure that a) the pause time target is maximized with respect to 350 // the pause interval and b) we maintain the invariant that pause 351 // time target < pause interval. If the user does not want this 352 // maximum flexibility, they will have to set the pause interval 353 // explicitly. 354 355 // First make sure that, if either parameter is set, its value is 356 // reasonable. 357 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 358 if (MaxGCPauseMillis < 1) { 359 vm_exit_during_initialization("MaxGCPauseMillis should be " 360 "greater than 0"); 361 } 362 } 363 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 364 if (GCPauseIntervalMillis < 1) { 365 vm_exit_during_initialization("GCPauseIntervalMillis should be " 366 "greater than 0"); 367 } 368 } 369 370 // Then, if the pause time target parameter was not set, set it to 371 // the default value. 372 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 373 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 374 // The default pause time target in G1 is 200ms 375 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 376 } else { 377 // We do not allow the pause interval to be set without the 378 // pause time target 379 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 380 "without setting MaxGCPauseMillis"); 381 } 382 } 383 384 // Then, if the interval parameter was not set, set it according to 385 // the pause time target (this will also deal with the case when the 386 // pause time target is the default value). 387 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 388 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 389 } 390 391 // Finally, make sure that the two parameters are consistent. 392 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 393 char buffer[256]; 394 jio_snprintf(buffer, 256, 395 "MaxGCPauseMillis (%u) should be less than " 396 "GCPauseIntervalMillis (%u)", 397 MaxGCPauseMillis, GCPauseIntervalMillis); 398 vm_exit_during_initialization(buffer); 399 } 400 401 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 402 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 403 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 404 _sigma = (double) G1ConfidencePercent / 100.0; 405 406 // start conservatively (around 50ms is about right) 407 _concurrent_mark_init_times_ms->add(0.05); 408 _concurrent_mark_remark_times_ms->add(0.05); 409 _concurrent_mark_cleanup_times_ms->add(0.20); 410 _tenuring_threshold = MaxTenuringThreshold; 411 412 // if G1FixedSurvivorSpaceSize is 0 which means the size is not 413 // fixed, then _max_survivor_regions will be calculated at 414 // calculate_young_list_target_length during initialization 415 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; 416 417 assert(GCTimeRatio > 0, 418 "we should have set it to a default value set_g1_gc_flags() " 419 "if a user set it to 0"); 420 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 421 422 initialize_all(); 423 } 424 425 // Increment "i", mod "len" 426 static void inc_mod(int& i, int len) { 427 i++; if (i == len) i = 0; 428 } 429 430 void G1CollectorPolicy::initialize_flags() { 431 set_min_alignment(HeapRegion::GrainBytes); 432 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name())); 433 if (SurvivorRatio < 1) { 434 vm_exit_during_initialization("Invalid survivor ratio specified"); 435 } 436 CollectorPolicy::initialize_flags(); 437 } 438 439 // The easiest way to deal with the parsing of the NewSize / 440 // MaxNewSize / etc. parameteres is to re-use the code in the 441 // TwoGenerationCollectorPolicy class. This is similar to what 442 // ParallelScavenge does with its GenerationSizer class (see 443 // ParallelScavengeHeap::initialize()). We might change this in the 444 // future, but it's a good start. 445 class G1YoungGenSizer : public TwoGenerationCollectorPolicy { 446 size_t size_to_region_num(size_t byte_size) { 447 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes); 448 } 449 450 public: 451 G1YoungGenSizer() { 452 initialize_flags(); 453 initialize_size_info(); 454 } 455 456 size_t min_young_region_num() { 457 return size_to_region_num(_min_gen0_size); 458 } 459 size_t initial_young_region_num() { 460 return size_to_region_num(_initial_gen0_size); 461 } 462 size_t max_young_region_num() { 463 return size_to_region_num(_max_gen0_size); 464 } 465 }; 466 467 void G1CollectorPolicy::init() { 468 // Set aside an initial future to_space. 469 _g1 = G1CollectedHeap::heap(); 470 471 assert(Heap_lock->owned_by_self(), "Locking discipline."); 472 473 initialize_gc_policy_counters(); 474 475 if (G1Gen) { 476 _in_young_gc_mode = true; 477 478 G1YoungGenSizer sizer; 479 size_t initial_region_num = sizer.initial_young_region_num(); 480 481 if (UseAdaptiveSizePolicy) { 482 set_adaptive_young_list_length(true); 483 _young_list_fixed_length = 0; 484 } else { 485 set_adaptive_young_list_length(false); 486 _young_list_fixed_length = initial_region_num; 487 } 488 _free_regions_at_end_of_collection = _g1->free_regions(); 489 calculate_young_list_min_length(); 490 guarantee( _young_list_min_length == 0, "invariant, not enough info" ); 491 calculate_young_list_target_length(); 492 } else { 493 _young_list_fixed_length = 0; 494 _in_young_gc_mode = false; 495 } 496 497 // We may immediately start allocating regions and placing them on the 498 // collection set list. Initialize the per-collection set info 499 start_incremental_cset_building(); 500 } 501 502 // Create the jstat counters for the policy. 503 void G1CollectorPolicy::initialize_gc_policy_counters() 504 { 505 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen); 506 } 507 508 void G1CollectorPolicy::calculate_young_list_min_length() { 509 _young_list_min_length = 0; 510 511 if (!adaptive_young_list_length()) 512 return; 513 514 if (_alloc_rate_ms_seq->num() > 3) { 515 double now_sec = os::elapsedTime(); 516 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 517 double alloc_rate_ms = predict_alloc_rate_ms(); 518 size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms); 519 size_t current_region_num = _g1->young_list()->length(); 520 _young_list_min_length = min_regions + current_region_num; 521 } 522 } 523 524 void G1CollectorPolicy::calculate_young_list_target_length() { 525 if (adaptive_young_list_length()) { 526 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 527 calculate_young_list_target_length(rs_lengths); 528 } else { 529 if (full_young_gcs()) 530 _young_list_target_length = _young_list_fixed_length; 531 else 532 _young_list_target_length = _young_list_fixed_length / 2; 533 } 534 535 // Make sure we allow the application to allocate at least one 536 // region before we need to do a collection again. 537 size_t min_length = _g1->young_list()->length() + 1; 538 _young_list_target_length = MAX2(_young_list_target_length, min_length); 539 calculate_max_gc_locker_expansion(); 540 calculate_survivors_policy(); 541 } 542 543 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) { 544 guarantee( adaptive_young_list_length(), "pre-condition" ); 545 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" ); 546 547 double start_time_sec = os::elapsedTime(); 548 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent); 549 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc); 550 size_t reserve_regions = 551 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0); 552 553 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) { 554 // we are in fully-young mode and there are free regions in the heap 555 556 double survivor_regions_evac_time = 557 predict_survivor_regions_evac_time(); 558 559 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 560 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 561 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 562 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 563 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) 564 + survivor_regions_evac_time; 565 566 // the result 567 size_t final_young_length = 0; 568 569 size_t init_free_regions = 570 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions); 571 572 // if we're still under the pause target... 573 if (base_time_ms <= target_pause_time_ms) { 574 // We make sure that the shortest young length that makes sense 575 // fits within the target pause time. 576 size_t min_young_length = 1; 577 578 if (predict_will_fit(min_young_length, base_time_ms, 579 init_free_regions, target_pause_time_ms)) { 580 // The shortest young length will fit within the target pause time; 581 // we'll now check whether the absolute maximum number of young 582 // regions will fit in the target pause time. If not, we'll do 583 // a binary search between min_young_length and max_young_length 584 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1; 585 size_t max_young_length = abs_max_young_length; 586 587 if (max_young_length > min_young_length) { 588 // Let's check if the initial max young length will fit within the 589 // target pause. If so then there is no need to search for a maximal 590 // young length - we'll return the initial maximum 591 592 if (predict_will_fit(max_young_length, base_time_ms, 593 init_free_regions, target_pause_time_ms)) { 594 // The maximum young length will satisfy the target pause time. 595 // We are done so set min young length to this maximum length. 596 // The code after the loop will then set final_young_length using 597 // the value cached in the minimum length. 598 min_young_length = max_young_length; 599 } else { 600 // The maximum possible number of young regions will not fit within 601 // the target pause time so let's search.... 602 603 size_t diff = (max_young_length - min_young_length) / 2; 604 max_young_length = min_young_length + diff; 605 606 while (max_young_length > min_young_length) { 607 if (predict_will_fit(max_young_length, base_time_ms, 608 init_free_regions, target_pause_time_ms)) { 609 610 // The current max young length will fit within the target 611 // pause time. Note we do not exit the loop here. By setting 612 // min = max, and then increasing the max below means that 613 // we will continue searching for an upper bound in the 614 // range [max..max+diff] 615 min_young_length = max_young_length; 616 } 617 diff = (max_young_length - min_young_length) / 2; 618 max_young_length = min_young_length + diff; 619 } 620 // the above loop found a maximal young length that will fit 621 // within the target pause time. 622 } 623 assert(min_young_length <= abs_max_young_length, "just checking"); 624 } 625 final_young_length = min_young_length; 626 } 627 } 628 // and we're done! 629 630 // we should have at least one region in the target young length 631 _young_list_target_length = 632 final_young_length + _recorded_survivor_regions; 633 634 // let's keep an eye of how long we spend on this calculation 635 // right now, I assume that we'll print it when we need it; we 636 // should really adde it to the breakdown of a pause 637 double end_time_sec = os::elapsedTime(); 638 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0; 639 640 #ifdef TRACE_CALC_YOUNG_LENGTH 641 // leave this in for debugging, just in case 642 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", " 643 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT, 644 target_pause_time_ms, 645 _young_list_target_length 646 elapsed_time_ms, 647 full_young_gcs() ? "full" : "partial", 648 during_initial_mark_pause() ? " i-m" : "", 649 _in_marking_window, 650 _in_marking_window_im); 651 #endif // TRACE_CALC_YOUNG_LENGTH 652 653 if (_young_list_target_length < _young_list_min_length) { 654 // bummer; this means that, if we do a pause when the maximal 655 // length dictates, we'll violate the pause spacing target (the 656 // min length was calculate based on the application's current 657 // alloc rate); 658 659 // so, we have to bite the bullet, and allocate the minimum 660 // number. We'll violate our target, but we just can't meet it. 661 662 #ifdef TRACE_CALC_YOUNG_LENGTH 663 // leave this in for debugging, just in case 664 gclog_or_tty->print_cr("adjusted target length from " 665 SIZE_FORMAT " to " SIZE_FORMAT, 666 _young_list_target_length, _young_list_min_length); 667 #endif // TRACE_CALC_YOUNG_LENGTH 668 669 _young_list_target_length = _young_list_min_length; 670 } 671 } else { 672 // we are in a partially-young mode or we've run out of regions (due 673 // to evacuation failure) 674 675 #ifdef TRACE_CALC_YOUNG_LENGTH 676 // leave this in for debugging, just in case 677 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT 678 _young_list_min_length); 679 #endif // TRACE_CALC_YOUNG_LENGTH 680 // we'll do the pause as soon as possible by choosing the minimum 681 _young_list_target_length = _young_list_min_length; 682 } 683 684 _rs_lengths_prediction = rs_lengths; 685 } 686 687 // This is used by: calculate_young_list_target_length(rs_length). It 688 // returns true iff: 689 // the predicted pause time for the given young list will not overflow 690 // the target pause time 691 // and: 692 // the predicted amount of surviving data will not overflow the 693 // the amount of free space available for survivor regions. 694 // 695 bool 696 G1CollectorPolicy::predict_will_fit(size_t young_length, 697 double base_time_ms, 698 size_t init_free_regions, 699 double target_pause_time_ms) { 700 701 if (young_length >= init_free_regions) 702 // end condition 1: not enough space for the young regions 703 return false; 704 705 double accum_surv_rate_adj = 0.0; 706 double accum_surv_rate = 707 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj; 708 709 size_t bytes_to_copy = 710 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 711 712 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 713 714 double young_other_time_ms = 715 predict_young_other_time_ms(young_length); 716 717 double pause_time_ms = 718 base_time_ms + copy_time_ms + young_other_time_ms; 719 720 if (pause_time_ms > target_pause_time_ms) 721 // end condition 2: over the target pause time 722 return false; 723 724 size_t free_bytes = 725 (init_free_regions - young_length) * HeapRegion::GrainBytes; 726 727 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes) 728 // end condition 3: out of to-space (conservatively) 729 return false; 730 731 // success! 732 return true; 733 } 734 735 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 736 double survivor_regions_evac_time = 0.0; 737 for (HeapRegion * r = _recorded_survivor_head; 738 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 739 r = r->get_next_young_region()) { 740 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true); 741 } 742 return survivor_regions_evac_time; 743 } 744 745 void G1CollectorPolicy::check_prediction_validity() { 746 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 747 748 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 749 if (rs_lengths > _rs_lengths_prediction) { 750 // add 10% to avoid having to recalculate often 751 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 752 calculate_young_list_target_length(rs_lengths_prediction); 753 } 754 } 755 756 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 757 bool is_tlab, 758 bool* gc_overhead_limit_was_exceeded) { 759 guarantee(false, "Not using this policy feature yet."); 760 return NULL; 761 } 762 763 // This method controls how a collector handles one or more 764 // of its generations being fully allocated. 765 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 766 bool is_tlab) { 767 guarantee(false, "Not using this policy feature yet."); 768 return NULL; 769 } 770 771 772 #ifndef PRODUCT 773 bool G1CollectorPolicy::verify_young_ages() { 774 HeapRegion* head = _g1->young_list()->first_region(); 775 return 776 verify_young_ages(head, _short_lived_surv_rate_group); 777 // also call verify_young_ages on any additional surv rate groups 778 } 779 780 bool 781 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 782 SurvRateGroup *surv_rate_group) { 783 guarantee( surv_rate_group != NULL, "pre-condition" ); 784 785 const char* name = surv_rate_group->name(); 786 bool ret = true; 787 int prev_age = -1; 788 789 for (HeapRegion* curr = head; 790 curr != NULL; 791 curr = curr->get_next_young_region()) { 792 SurvRateGroup* group = curr->surv_rate_group(); 793 if (group == NULL && !curr->is_survivor()) { 794 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 795 ret = false; 796 } 797 798 if (surv_rate_group == group) { 799 int age = curr->age_in_surv_rate_group(); 800 801 if (age < 0) { 802 gclog_or_tty->print_cr("## %s: encountered negative age", name); 803 ret = false; 804 } 805 806 if (age <= prev_age) { 807 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 808 "(%d, %d)", name, age, prev_age); 809 ret = false; 810 } 811 prev_age = age; 812 } 813 } 814 815 return ret; 816 } 817 #endif // PRODUCT 818 819 void G1CollectorPolicy::record_full_collection_start() { 820 _cur_collection_start_sec = os::elapsedTime(); 821 // Release the future to-space so that it is available for compaction into. 822 _g1->set_full_collection(); 823 } 824 825 void G1CollectorPolicy::record_full_collection_end() { 826 // Consider this like a collection pause for the purposes of allocation 827 // since last pause. 828 double end_sec = os::elapsedTime(); 829 double full_gc_time_sec = end_sec - _cur_collection_start_sec; 830 double full_gc_time_ms = full_gc_time_sec * 1000.0; 831 832 _all_full_gc_times_ms->add(full_gc_time_ms); 833 834 update_recent_gc_times(end_sec, full_gc_time_ms); 835 836 _g1->clear_full_collection(); 837 838 // "Nuke" the heuristics that control the fully/partially young GC 839 // transitions and make sure we start with fully young GCs after the 840 // Full GC. 841 set_full_young_gcs(true); 842 _last_full_young_gc = false; 843 _should_revert_to_full_young_gcs = false; 844 clear_initiate_conc_mark_if_possible(); 845 clear_during_initial_mark_pause(); 846 _known_garbage_bytes = 0; 847 _known_garbage_ratio = 0.0; 848 _in_marking_window = false; 849 _in_marking_window_im = false; 850 851 _short_lived_surv_rate_group->start_adding_regions(); 852 // also call this on any additional surv rate groups 853 854 record_survivor_regions(0, NULL, NULL); 855 856 _prev_region_num_young = _region_num_young; 857 _prev_region_num_tenured = _region_num_tenured; 858 859 _free_regions_at_end_of_collection = _g1->free_regions(); 860 // Reset survivors SurvRateGroup. 861 _survivor_surv_rate_group->reset(); 862 calculate_young_list_min_length(); 863 calculate_young_list_target_length(); 864 } 865 866 void G1CollectorPolicy::record_stop_world_start() { 867 _stop_world_start = os::elapsedTime(); 868 } 869 870 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, 871 size_t start_used) { 872 if (PrintGCDetails) { 873 gclog_or_tty->stamp(PrintGCTimeStamps); 874 gclog_or_tty->print("[GC pause"); 875 if (in_young_gc_mode()) 876 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial"); 877 } 878 879 assert(_g1->used() == _g1->recalculate_used(), 880 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 881 _g1->used(), _g1->recalculate_used())); 882 883 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 884 _all_stop_world_times_ms->add(s_w_t_ms); 885 _stop_world_start = 0.0; 886 887 _cur_collection_start_sec = start_time_sec; 888 _cur_collection_pause_used_at_start_bytes = start_used; 889 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 890 _pending_cards = _g1->pending_card_num(); 891 _max_pending_cards = _g1->max_pending_card_num(); 892 893 _bytes_in_collection_set_before_gc = 0; 894 _bytes_copied_during_gc = 0; 895 896 YoungList* young_list = _g1->young_list(); 897 _eden_bytes_before_gc = young_list->eden_used_bytes(); 898 _survivor_bytes_before_gc = young_list->survivor_used_bytes(); 899 _capacity_before_gc = _g1->capacity(); 900 901 #ifdef DEBUG 902 // initialise these to something well known so that we can spot 903 // if they are not set properly 904 905 for (int i = 0; i < _parallel_gc_threads; ++i) { 906 _par_last_gc_worker_start_times_ms[i] = -1234.0; 907 _par_last_ext_root_scan_times_ms[i] = -1234.0; 908 _par_last_mark_stack_scan_times_ms[i] = -1234.0; 909 _par_last_update_rs_times_ms[i] = -1234.0; 910 _par_last_update_rs_processed_buffers[i] = -1234.0; 911 _par_last_scan_rs_times_ms[i] = -1234.0; 912 _par_last_obj_copy_times_ms[i] = -1234.0; 913 _par_last_termination_times_ms[i] = -1234.0; 914 _par_last_termination_attempts[i] = -1234.0; 915 _par_last_gc_worker_end_times_ms[i] = -1234.0; 916 _par_last_gc_worker_times_ms[i] = -1234.0; 917 } 918 #endif 919 920 for (int i = 0; i < _aux_num; ++i) { 921 _cur_aux_times_ms[i] = 0.0; 922 _cur_aux_times_set[i] = false; 923 } 924 925 _satb_drain_time_set = false; 926 _last_satb_drain_processed_buffers = -1; 927 928 if (in_young_gc_mode()) 929 _last_young_gc_full = false; 930 931 // do that for any other surv rate groups 932 _short_lived_surv_rate_group->stop_adding_regions(); 933 _survivors_age_table.clear(); 934 935 assert( verify_young_ages(), "region age verification" ); 936 } 937 938 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) { 939 _mark_closure_time_ms = mark_closure_time_ms; 940 } 941 942 void G1CollectorPolicy::record_concurrent_mark_init_start() { 943 _mark_init_start_sec = os::elapsedTime(); 944 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode"); 945 } 946 947 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double 948 mark_init_elapsed_time_ms) { 949 _during_marking = true; 950 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 951 clear_during_initial_mark_pause(); 952 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 953 } 954 955 void G1CollectorPolicy::record_concurrent_mark_init_end() { 956 double end_time_sec = os::elapsedTime(); 957 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0; 958 _concurrent_mark_init_times_ms->add(elapsed_time_ms); 959 record_concurrent_mark_init_end_pre(elapsed_time_ms); 960 961 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true); 962 } 963 964 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 965 _mark_remark_start_sec = os::elapsedTime(); 966 _during_marking = false; 967 } 968 969 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 970 double end_time_sec = os::elapsedTime(); 971 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 972 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 973 _cur_mark_stop_world_time_ms += elapsed_time_ms; 974 _prev_collection_pause_end_ms += elapsed_time_ms; 975 976 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 977 } 978 979 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 980 _mark_cleanup_start_sec = os::elapsedTime(); 981 } 982 983 void 984 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes, 985 size_t max_live_bytes) { 986 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); 987 record_concurrent_mark_cleanup_end_work2(); 988 } 989 990 void 991 G1CollectorPolicy:: 992 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, 993 size_t max_live_bytes) { 994 if (_n_marks < 2) _n_marks++; 995 if (G1PolicyVerbose > 0) 996 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB " 997 " (of " SIZE_FORMAT " MB heap).", 998 max_live_bytes/M, _g1->capacity()/M); 999 } 1000 1001 // The important thing about this is that it includes "os::elapsedTime". 1002 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() { 1003 double end_time_sec = os::elapsedTime(); 1004 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0; 1005 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1006 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1007 _prev_collection_pause_end_ms += elapsed_time_ms; 1008 1009 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true); 1010 1011 _num_markings++; 1012 1013 // We did a marking, so reset the "since_last_mark" variables. 1014 double considerConcMarkCost = 1.0; 1015 // If there are available processors, concurrent activity is free... 1016 if (Threads::number_of_non_daemon_threads() * 2 < 1017 os::active_processor_count()) { 1018 considerConcMarkCost = 0.0; 1019 } 1020 _n_pauses_at_mark_end = _n_pauses; 1021 _n_marks_since_last_pause++; 1022 } 1023 1024 void 1025 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 1026 if (in_young_gc_mode()) { 1027 _should_revert_to_full_young_gcs = false; 1028 _last_full_young_gc = true; 1029 _in_marking_window = false; 1030 if (adaptive_young_list_length()) 1031 calculate_young_list_target_length(); 1032 } 1033 } 1034 1035 void G1CollectorPolicy::record_concurrent_pause() { 1036 if (_stop_world_start > 0.0) { 1037 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 1038 _all_yield_times_ms->add(yield_ms); 1039 } 1040 } 1041 1042 void G1CollectorPolicy::record_concurrent_pause_end() { 1043 } 1044 1045 template<class T> 1046 T sum_of(T* sum_arr, int start, int n, int N) { 1047 T sum = (T)0; 1048 for (int i = 0; i < n; i++) { 1049 int j = (start + i) % N; 1050 sum += sum_arr[j]; 1051 } 1052 return sum; 1053 } 1054 1055 void G1CollectorPolicy::print_par_stats(int level, 1056 const char* str, 1057 double* data) { 1058 double min = data[0], max = data[0]; 1059 double total = 0.0; 1060 LineBuffer buf(level); 1061 buf.append("[%s (ms):", str); 1062 for (uint i = 0; i < ParallelGCThreads; ++i) { 1063 double val = data[i]; 1064 if (val < min) 1065 min = val; 1066 if (val > max) 1067 max = val; 1068 total += val; 1069 buf.append(" %3.1lf", val); 1070 } 1071 buf.append_and_print_cr(""); 1072 double avg = total / (double) ParallelGCThreads; 1073 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]", 1074 avg, min, max, max - min); 1075 } 1076 1077 void G1CollectorPolicy::print_par_sizes(int level, 1078 const char* str, 1079 double* data) { 1080 double min = data[0], max = data[0]; 1081 double total = 0.0; 1082 LineBuffer buf(level); 1083 buf.append("[%s :", str); 1084 for (uint i = 0; i < ParallelGCThreads; ++i) { 1085 double val = data[i]; 1086 if (val < min) 1087 min = val; 1088 if (val > max) 1089 max = val; 1090 total += val; 1091 buf.append(" %d", (int) val); 1092 } 1093 buf.append_and_print_cr(""); 1094 double avg = total / (double) ParallelGCThreads; 1095 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]", 1096 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min); 1097 } 1098 1099 void G1CollectorPolicy::print_stats (int level, 1100 const char* str, 1101 double value) { 1102 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value); 1103 } 1104 1105 void G1CollectorPolicy::print_stats (int level, 1106 const char* str, 1107 int value) { 1108 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value); 1109 } 1110 1111 double G1CollectorPolicy::avg_value (double* data) { 1112 if (G1CollectedHeap::use_parallel_gc_threads()) { 1113 double ret = 0.0; 1114 for (uint i = 0; i < ParallelGCThreads; ++i) 1115 ret += data[i]; 1116 return ret / (double) ParallelGCThreads; 1117 } else { 1118 return data[0]; 1119 } 1120 } 1121 1122 double G1CollectorPolicy::max_value (double* data) { 1123 if (G1CollectedHeap::use_parallel_gc_threads()) { 1124 double ret = data[0]; 1125 for (uint i = 1; i < ParallelGCThreads; ++i) 1126 if (data[i] > ret) 1127 ret = data[i]; 1128 return ret; 1129 } else { 1130 return data[0]; 1131 } 1132 } 1133 1134 double G1CollectorPolicy::sum_of_values (double* data) { 1135 if (G1CollectedHeap::use_parallel_gc_threads()) { 1136 double sum = 0.0; 1137 for (uint i = 0; i < ParallelGCThreads; i++) 1138 sum += data[i]; 1139 return sum; 1140 } else { 1141 return data[0]; 1142 } 1143 } 1144 1145 double G1CollectorPolicy::max_sum (double* data1, 1146 double* data2) { 1147 double ret = data1[0] + data2[0]; 1148 1149 if (G1CollectedHeap::use_parallel_gc_threads()) { 1150 for (uint i = 1; i < ParallelGCThreads; ++i) { 1151 double data = data1[i] + data2[i]; 1152 if (data > ret) 1153 ret = data; 1154 } 1155 } 1156 return ret; 1157 } 1158 1159 // Anything below that is considered to be zero 1160 #define MIN_TIMER_GRANULARITY 0.0000001 1161 1162 void G1CollectorPolicy::record_collection_pause_end() { 1163 double end_time_sec = os::elapsedTime(); 1164 double elapsed_ms = _last_pause_time_ms; 1165 bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 1166 size_t rs_size = 1167 _cur_collection_pause_used_regions_at_start - collection_set_size(); 1168 size_t cur_used_bytes = _g1->used(); 1169 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1170 bool last_pause_included_initial_mark = false; 1171 bool update_stats = !_g1->evacuation_failed(); 1172 1173 #ifndef PRODUCT 1174 if (G1YoungSurvRateVerbose) { 1175 gclog_or_tty->print_cr(""); 1176 _short_lived_surv_rate_group->print(); 1177 // do that for any other surv rate groups too 1178 } 1179 #endif // PRODUCT 1180 1181 if (in_young_gc_mode()) { 1182 last_pause_included_initial_mark = during_initial_mark_pause(); 1183 if (last_pause_included_initial_mark) 1184 record_concurrent_mark_init_end_pre(0.0); 1185 1186 size_t min_used_targ = 1187 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 1188 1189 1190 if (!_g1->mark_in_progress() && !_last_full_young_gc) { 1191 assert(!last_pause_included_initial_mark, "invariant"); 1192 if (cur_used_bytes > min_used_targ && 1193 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { 1194 assert(!during_initial_mark_pause(), "we should not see this here"); 1195 1196 // Note: this might have already been set, if during the last 1197 // pause we decided to start a cycle but at the beginning of 1198 // this pause we decided to postpone it. That's OK. 1199 set_initiate_conc_mark_if_possible(); 1200 } 1201 } 1202 1203 _prev_collection_pause_used_at_end_bytes = cur_used_bytes; 1204 } 1205 1206 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, 1207 end_time_sec, false); 1208 1209 guarantee(_cur_collection_pause_used_regions_at_start >= 1210 collection_set_size(), 1211 "Negative RS size?"); 1212 1213 // This assert is exempted when we're doing parallel collection pauses, 1214 // because the fragmentation caused by the parallel GC allocation buffers 1215 // can lead to more memory being used during collection than was used 1216 // before. Best leave this out until the fragmentation problem is fixed. 1217 // Pauses in which evacuation failed can also lead to negative 1218 // collections, since no space is reclaimed from a region containing an 1219 // object whose evacuation failed. 1220 // Further, we're now always doing parallel collection. But I'm still 1221 // leaving this here as a placeholder for a more precise assertion later. 1222 // (DLD, 10/05.) 1223 assert((true || parallel) // Always using GC LABs now. 1224 || _g1->evacuation_failed() 1225 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes, 1226 "Negative collection"); 1227 1228 size_t freed_bytes = 1229 _cur_collection_pause_used_at_start_bytes - cur_used_bytes; 1230 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes; 1231 1232 double survival_fraction = 1233 (double)surviving_bytes/ 1234 (double)_collection_set_bytes_used_before; 1235 1236 _n_pauses++; 1237 1238 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms); 1239 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms); 1240 double update_rs_time = avg_value(_par_last_update_rs_times_ms); 1241 double update_rs_processed_buffers = 1242 sum_of_values(_par_last_update_rs_processed_buffers); 1243 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms); 1244 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms); 1245 double termination_time = avg_value(_par_last_termination_times_ms); 1246 1247 double parallel_known_time = update_rs_time + 1248 ext_root_scan_time + 1249 mark_stack_scan_time + 1250 scan_rs_time + 1251 obj_copy_time + 1252 termination_time; 1253 1254 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time; 1255 1256 PauseSummary* summary = _summary; 1257 1258 if (update_stats) { 1259 _recent_rs_scan_times_ms->add(scan_rs_time); 1260 _recent_pause_times_ms->add(elapsed_ms); 1261 _recent_rs_sizes->add(rs_size); 1262 1263 MainBodySummary* body_summary = summary->main_body_summary(); 1264 guarantee(body_summary != NULL, "should not be null!"); 1265 1266 if (_satb_drain_time_set) 1267 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms); 1268 else 1269 body_summary->record_satb_drain_time_ms(0.0); 1270 1271 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time); 1272 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time); 1273 body_summary->record_update_rs_time_ms(update_rs_time); 1274 body_summary->record_scan_rs_time_ms(scan_rs_time); 1275 body_summary->record_obj_copy_time_ms(obj_copy_time); 1276 if (parallel) { 1277 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms); 1278 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms); 1279 body_summary->record_termination_time_ms(termination_time); 1280 body_summary->record_parallel_other_time_ms(parallel_other_time); 1281 } 1282 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms); 1283 1284 // We exempt parallel collection from this check because Alloc Buffer 1285 // fragmentation can produce negative collections. Same with evac 1286 // failure. 1287 // Further, we're now always doing parallel collection. But I'm still 1288 // leaving this here as a placeholder for a more precise assertion later. 1289 // (DLD, 10/05. 1290 assert((true || parallel) 1291 || _g1->evacuation_failed() 1292 || surviving_bytes <= _collection_set_bytes_used_before, 1293 "Or else negative collection!"); 1294 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before); 1295 _recent_CS_bytes_surviving->add(surviving_bytes); 1296 1297 // this is where we update the allocation rate of the application 1298 double app_time_ms = 1299 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms); 1300 if (app_time_ms < MIN_TIMER_GRANULARITY) { 1301 // This usually happens due to the timer not having the required 1302 // granularity. Some Linuxes are the usual culprits. 1303 // We'll just set it to something (arbitrarily) small. 1304 app_time_ms = 1.0; 1305 } 1306 size_t regions_allocated = 1307 (_region_num_young - _prev_region_num_young) + 1308 (_region_num_tenured - _prev_region_num_tenured); 1309 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1310 _alloc_rate_ms_seq->add(alloc_rate_ms); 1311 _prev_region_num_young = _region_num_young; 1312 _prev_region_num_tenured = _region_num_tenured; 1313 1314 double interval_ms = 1315 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1316 update_recent_gc_times(end_time_sec, elapsed_ms); 1317 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1318 if (recent_avg_pause_time_ratio() < 0.0 || 1319 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1320 #ifndef PRODUCT 1321 // Dump info to allow post-facto debugging 1322 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 1323 gclog_or_tty->print_cr("-------------------------------------------"); 1324 gclog_or_tty->print_cr("Recent GC Times (ms):"); 1325 _recent_gc_times_ms->dump(); 1326 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 1327 _recent_prev_end_times_for_all_gcs_sec->dump(); 1328 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 1329 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 1330 // In debug mode, terminate the JVM if the user wants to debug at this point. 1331 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 1332 #endif // !PRODUCT 1333 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1334 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1335 if (_recent_avg_pause_time_ratio < 0.0) { 1336 _recent_avg_pause_time_ratio = 0.0; 1337 } else { 1338 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1339 _recent_avg_pause_time_ratio = 1.0; 1340 } 1341 } 1342 } 1343 1344 if (G1PolicyVerbose > 1) { 1345 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses); 1346 } 1347 1348 if (G1PolicyVerbose > 1) { 1349 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n" 1350 " ET-RS: %10.6f ms (avg: %10.6f ms)\n" 1351 " |RS|: " SIZE_FORMAT, 1352 elapsed_ms, recent_avg_time_for_pauses_ms(), 1353 scan_rs_time, recent_avg_time_for_rs_scan_ms(), 1354 rs_size); 1355 1356 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K" 1357 " At end " SIZE_FORMAT "K\n" 1358 " garbage : " SIZE_FORMAT "K" 1359 " of " SIZE_FORMAT "K\n" 1360 " survival : %6.2f%% (%6.2f%% avg)", 1361 _cur_collection_pause_used_at_start_bytes/K, 1362 _g1->used()/K, freed_bytes/K, 1363 _collection_set_bytes_used_before/K, 1364 survival_fraction*100.0, 1365 recent_avg_survival_fraction()*100.0); 1366 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f", 1367 recent_avg_pause_time_ratio() * 100.0); 1368 } 1369 1370 double other_time_ms = elapsed_ms; 1371 1372 if (_satb_drain_time_set) { 1373 other_time_ms -= _cur_satb_drain_time_ms; 1374 } 1375 1376 if (parallel) { 1377 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms; 1378 } else { 1379 other_time_ms -= 1380 update_rs_time + 1381 ext_root_scan_time + mark_stack_scan_time + 1382 scan_rs_time + obj_copy_time; 1383 } 1384 1385 if (PrintGCDetails) { 1386 gclog_or_tty->print_cr("%s, %1.8lf secs]", 1387 (last_pause_included_initial_mark) ? " (initial-mark)" : "", 1388 elapsed_ms / 1000.0); 1389 1390 if (_satb_drain_time_set) { 1391 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms); 1392 } 1393 if (_last_satb_drain_processed_buffers >= 0) { 1394 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers); 1395 } 1396 if (parallel) { 1397 print_stats(1, "Parallel Time", _cur_collection_par_time_ms); 1398 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms); 1399 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); 1400 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers); 1401 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); 1402 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); 1403 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); 1404 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); 1405 print_par_stats(2, "Termination", _par_last_termination_times_ms); 1406 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts); 1407 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms); 1408 1409 for (int i = 0; i < _parallel_gc_threads; i++) { 1410 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i]; 1411 } 1412 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms); 1413 1414 print_stats(2, "Parallel Other", parallel_other_time); 1415 print_stats(1, "Clear CT", _cur_clear_ct_time_ms); 1416 } else { 1417 print_stats(1, "Update RS", update_rs_time); 1418 print_stats(2, "Processed Buffers", 1419 (int)update_rs_processed_buffers); 1420 print_stats(1, "Ext Root Scanning", ext_root_scan_time); 1421 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time); 1422 print_stats(1, "Scan RS", scan_rs_time); 1423 print_stats(1, "Object Copying", obj_copy_time); 1424 } 1425 #ifndef PRODUCT 1426 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms); 1427 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms); 1428 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms); 1429 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms); 1430 if (_num_cc_clears > 0) { 1431 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears)); 1432 } 1433 #endif 1434 print_stats(1, "Other", other_time_ms); 1435 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms); 1436 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms); 1437 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); 1438 1439 for (int i = 0; i < _aux_num; ++i) { 1440 if (_cur_aux_times_set[i]) { 1441 char buffer[96]; 1442 sprintf(buffer, "Aux%d", i); 1443 print_stats(1, buffer, _cur_aux_times_ms[i]); 1444 } 1445 } 1446 } 1447 1448 _all_pause_times_ms->add(elapsed_ms); 1449 if (update_stats) { 1450 summary->record_total_time_ms(elapsed_ms); 1451 summary->record_other_time_ms(other_time_ms); 1452 } 1453 for (int i = 0; i < _aux_num; ++i) 1454 if (_cur_aux_times_set[i]) 1455 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]); 1456 1457 // Reset marks-between-pauses counter. 1458 _n_marks_since_last_pause = 0; 1459 1460 // Update the efficiency-since-mark vars. 1461 double proc_ms = elapsed_ms * (double) _parallel_gc_threads; 1462 if (elapsed_ms < MIN_TIMER_GRANULARITY) { 1463 // This usually happens due to the timer not having the required 1464 // granularity. Some Linuxes are the usual culprits. 1465 // We'll just set it to something (arbitrarily) small. 1466 proc_ms = 1.0; 1467 } 1468 double cur_efficiency = (double) freed_bytes / proc_ms; 1469 1470 bool new_in_marking_window = _in_marking_window; 1471 bool new_in_marking_window_im = false; 1472 if (during_initial_mark_pause()) { 1473 new_in_marking_window = true; 1474 new_in_marking_window_im = true; 1475 } 1476 1477 if (in_young_gc_mode()) { 1478 if (_last_full_young_gc) { 1479 set_full_young_gcs(false); 1480 _last_full_young_gc = false; 1481 } 1482 1483 if ( !_last_young_gc_full ) { 1484 if ( _should_revert_to_full_young_gcs || 1485 _known_garbage_ratio < 0.05 || 1486 (adaptive_young_list_length() && 1487 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) { 1488 set_full_young_gcs(true); 1489 } 1490 } 1491 _should_revert_to_full_young_gcs = false; 1492 1493 if (_last_young_gc_full && !_during_marking) 1494 _young_gc_eff_seq->add(cur_efficiency); 1495 } 1496 1497 _short_lived_surv_rate_group->start_adding_regions(); 1498 // do that for any other surv rate groupsx 1499 1500 // <NEW PREDICTION> 1501 1502 if (update_stats) { 1503 double pause_time_ms = elapsed_ms; 1504 1505 size_t diff = 0; 1506 if (_max_pending_cards >= _pending_cards) 1507 diff = _max_pending_cards - _pending_cards; 1508 _pending_card_diff_seq->add((double) diff); 1509 1510 double cost_per_card_ms = 0.0; 1511 if (_pending_cards > 0) { 1512 cost_per_card_ms = update_rs_time / (double) _pending_cards; 1513 _cost_per_card_ms_seq->add(cost_per_card_ms); 1514 } 1515 1516 size_t cards_scanned = _g1->cards_scanned(); 1517 1518 double cost_per_entry_ms = 0.0; 1519 if (cards_scanned > 10) { 1520 cost_per_entry_ms = scan_rs_time / (double) cards_scanned; 1521 if (_last_young_gc_full) 1522 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1523 else 1524 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1525 } 1526 1527 if (_max_rs_lengths > 0) { 1528 double cards_per_entry_ratio = 1529 (double) cards_scanned / (double) _max_rs_lengths; 1530 if (_last_young_gc_full) 1531 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1532 else 1533 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1534 } 1535 1536 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1537 if (rs_length_diff >= 0) 1538 _rs_length_diff_seq->add((double) rs_length_diff); 1539 1540 size_t copied_bytes = surviving_bytes; 1541 double cost_per_byte_ms = 0.0; 1542 if (copied_bytes > 0) { 1543 cost_per_byte_ms = obj_copy_time / (double) copied_bytes; 1544 if (_in_marking_window) 1545 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1546 else 1547 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1548 } 1549 1550 double all_other_time_ms = pause_time_ms - 1551 (update_rs_time + scan_rs_time + obj_copy_time + 1552 _mark_closure_time_ms + termination_time); 1553 1554 double young_other_time_ms = 0.0; 1555 if (_recorded_young_regions > 0) { 1556 young_other_time_ms = 1557 _recorded_young_cset_choice_time_ms + 1558 _recorded_young_free_cset_time_ms; 1559 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1560 (double) _recorded_young_regions); 1561 } 1562 double non_young_other_time_ms = 0.0; 1563 if (_recorded_non_young_regions > 0) { 1564 non_young_other_time_ms = 1565 _recorded_non_young_cset_choice_time_ms + 1566 _recorded_non_young_free_cset_time_ms; 1567 1568 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1569 (double) _recorded_non_young_regions); 1570 } 1571 1572 double constant_other_time_ms = all_other_time_ms - 1573 (young_other_time_ms + non_young_other_time_ms); 1574 _constant_other_time_ms_seq->add(constant_other_time_ms); 1575 1576 double survival_ratio = 0.0; 1577 if (_bytes_in_collection_set_before_gc > 0) { 1578 survival_ratio = (double) _bytes_copied_during_gc / 1579 (double) _bytes_in_collection_set_before_gc; 1580 } 1581 1582 _pending_cards_seq->add((double) _pending_cards); 1583 _scanned_cards_seq->add((double) cards_scanned); 1584 _rs_lengths_seq->add((double) _max_rs_lengths); 1585 1586 double expensive_region_limit_ms = 1587 (double) MaxGCPauseMillis - predict_constant_other_time_ms(); 1588 if (expensive_region_limit_ms < 0.0) { 1589 // this means that the other time was predicted to be longer than 1590 // than the max pause time 1591 expensive_region_limit_ms = (double) MaxGCPauseMillis; 1592 } 1593 _expensive_region_limit_ms = expensive_region_limit_ms; 1594 1595 if (PREDICTIONS_VERBOSE) { 1596 gclog_or_tty->print_cr(""); 1597 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d " 1598 "REGIONS %d %d %d " 1599 "PENDING_CARDS %d %d " 1600 "CARDS_SCANNED %d %d " 1601 "RS_LENGTHS %d %d " 1602 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf " 1603 "SURVIVAL_RATIO %1.6lf %1.6lf " 1604 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf " 1605 "OTHER_YOUNG %1.6lf %1.6lf " 1606 "OTHER_NON_YOUNG %1.6lf %1.6lf " 1607 "VTIME_DIFF %1.6lf TERMINATION %1.6lf " 1608 "ELAPSED %1.6lf %1.6lf ", 1609 _cur_collection_start_sec, 1610 (!_last_young_gc_full) ? 2 : 1611 (last_pause_included_initial_mark) ? 1 : 0, 1612 _recorded_region_num, 1613 _recorded_young_regions, 1614 _recorded_non_young_regions, 1615 _predicted_pending_cards, _pending_cards, 1616 _predicted_cards_scanned, cards_scanned, 1617 _predicted_rs_lengths, _max_rs_lengths, 1618 _predicted_rs_update_time_ms, update_rs_time, 1619 _predicted_rs_scan_time_ms, scan_rs_time, 1620 _predicted_survival_ratio, survival_ratio, 1621 _predicted_object_copy_time_ms, obj_copy_time, 1622 _predicted_constant_other_time_ms, constant_other_time_ms, 1623 _predicted_young_other_time_ms, young_other_time_ms, 1624 _predicted_non_young_other_time_ms, 1625 non_young_other_time_ms, 1626 _vtime_diff_ms, termination_time, 1627 _predicted_pause_time_ms, elapsed_ms); 1628 } 1629 1630 if (G1PolicyVerbose > 0) { 1631 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms", 1632 _predicted_pause_time_ms, 1633 (_within_target) ? "within" : "outside", 1634 elapsed_ms); 1635 } 1636 1637 } 1638 1639 _in_marking_window = new_in_marking_window; 1640 _in_marking_window_im = new_in_marking_window_im; 1641 _free_regions_at_end_of_collection = _g1->free_regions(); 1642 calculate_young_list_min_length(); 1643 calculate_young_list_target_length(); 1644 1645 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1646 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1647 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); 1648 // </NEW PREDICTION> 1649 } 1650 1651 #define EXT_SIZE_FORMAT "%d%s" 1652 #define EXT_SIZE_PARAMS(bytes) \ 1653 byte_size_in_proper_unit((bytes)), \ 1654 proper_unit_for_byte_size((bytes)) 1655 1656 void G1CollectorPolicy::print_heap_transition() { 1657 if (PrintGCDetails) { 1658 YoungList* young_list = _g1->young_list(); 1659 size_t eden_bytes = young_list->eden_used_bytes(); 1660 size_t survivor_bytes = young_list->survivor_used_bytes(); 1661 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes; 1662 size_t used = _g1->used(); 1663 size_t capacity = _g1->capacity(); 1664 1665 gclog_or_tty->print_cr( 1666 " [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1667 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1668 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1669 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1670 EXT_SIZE_PARAMS(_eden_bytes_before_gc), 1671 EXT_SIZE_PARAMS(eden_bytes), 1672 EXT_SIZE_PARAMS(_survivor_bytes_before_gc), 1673 EXT_SIZE_PARAMS(survivor_bytes), 1674 EXT_SIZE_PARAMS(used_before_gc), 1675 EXT_SIZE_PARAMS(_capacity_before_gc), 1676 EXT_SIZE_PARAMS(used), 1677 EXT_SIZE_PARAMS(capacity)); 1678 } else if (PrintGC) { 1679 _g1->print_size_transition(gclog_or_tty, 1680 _cur_collection_pause_used_at_start_bytes, 1681 _g1->used(), _g1->capacity()); 1682 } 1683 } 1684 1685 // <NEW PREDICTION> 1686 1687 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1688 double update_rs_processed_buffers, 1689 double goal_ms) { 1690 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1691 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1692 1693 if (G1UseAdaptiveConcRefinement) { 1694 const int k_gy = 3, k_gr = 6; 1695 const double inc_k = 1.1, dec_k = 0.9; 1696 1697 int g = cg1r->green_zone(); 1698 if (update_rs_time > goal_ms) { 1699 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1700 } else { 1701 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1702 g = (int)MAX2(g * inc_k, g + 1.0); 1703 } 1704 } 1705 // Change the refinement threads params 1706 cg1r->set_green_zone(g); 1707 cg1r->set_yellow_zone(g * k_gy); 1708 cg1r->set_red_zone(g * k_gr); 1709 cg1r->reinitialize_threads(); 1710 1711 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1712 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1713 cg1r->yellow_zone()); 1714 // Change the barrier params 1715 dcqs.set_process_completed_threshold(processing_threshold); 1716 dcqs.set_max_completed_queue(cg1r->red_zone()); 1717 } 1718 1719 int curr_queue_size = dcqs.completed_buffers_num(); 1720 if (curr_queue_size >= cg1r->yellow_zone()) { 1721 dcqs.set_completed_queue_padding(curr_queue_size); 1722 } else { 1723 dcqs.set_completed_queue_padding(0); 1724 } 1725 dcqs.notify_if_necessary(); 1726 } 1727 1728 double 1729 G1CollectorPolicy:: 1730 predict_young_collection_elapsed_time_ms(size_t adjustment) { 1731 guarantee( adjustment == 0 || adjustment == 1, "invariant" ); 1732 1733 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1734 size_t young_num = g1h->young_list()->length(); 1735 if (young_num == 0) 1736 return 0.0; 1737 1738 young_num += adjustment; 1739 size_t pending_cards = predict_pending_cards(); 1740 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() + 1741 predict_rs_length_diff(); 1742 size_t card_num; 1743 if (full_young_gcs()) 1744 card_num = predict_young_card_num(rs_lengths); 1745 else 1746 card_num = predict_non_young_card_num(rs_lengths); 1747 size_t young_byte_size = young_num * HeapRegion::GrainBytes; 1748 double accum_yg_surv_rate = 1749 _short_lived_surv_rate_group->accum_surv_rate(adjustment); 1750 1751 size_t bytes_to_copy = 1752 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes); 1753 1754 return 1755 predict_rs_update_time_ms(pending_cards) + 1756 predict_rs_scan_time_ms(card_num) + 1757 predict_object_copy_time_ms(bytes_to_copy) + 1758 predict_young_other_time_ms(young_num) + 1759 predict_constant_other_time_ms(); 1760 } 1761 1762 double 1763 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1764 size_t rs_length = predict_rs_length_diff(); 1765 size_t card_num; 1766 if (full_young_gcs()) 1767 card_num = predict_young_card_num(rs_length); 1768 else 1769 card_num = predict_non_young_card_num(rs_length); 1770 return predict_base_elapsed_time_ms(pending_cards, card_num); 1771 } 1772 1773 double 1774 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1775 size_t scanned_cards) { 1776 return 1777 predict_rs_update_time_ms(pending_cards) + 1778 predict_rs_scan_time_ms(scanned_cards) + 1779 predict_constant_other_time_ms(); 1780 } 1781 1782 double 1783 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1784 bool young) { 1785 size_t rs_length = hr->rem_set()->occupied(); 1786 size_t card_num; 1787 if (full_young_gcs()) 1788 card_num = predict_young_card_num(rs_length); 1789 else 1790 card_num = predict_non_young_card_num(rs_length); 1791 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1792 1793 double region_elapsed_time_ms = 1794 predict_rs_scan_time_ms(card_num) + 1795 predict_object_copy_time_ms(bytes_to_copy); 1796 1797 if (young) 1798 region_elapsed_time_ms += predict_young_other_time_ms(1); 1799 else 1800 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1801 1802 return region_elapsed_time_ms; 1803 } 1804 1805 size_t 1806 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1807 size_t bytes_to_copy; 1808 if (hr->is_marked()) 1809 bytes_to_copy = hr->max_live_bytes(); 1810 else { 1811 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1, 1812 "invariant" ); 1813 int age = hr->age_in_surv_rate_group(); 1814 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1815 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1816 } 1817 1818 return bytes_to_copy; 1819 } 1820 1821 void 1822 G1CollectorPolicy::start_recording_regions() { 1823 _recorded_rs_lengths = 0; 1824 _recorded_young_regions = 0; 1825 _recorded_non_young_regions = 0; 1826 1827 #if PREDICTIONS_VERBOSE 1828 _recorded_marked_bytes = 0; 1829 _recorded_young_bytes = 0; 1830 _predicted_bytes_to_copy = 0; 1831 _predicted_rs_lengths = 0; 1832 _predicted_cards_scanned = 0; 1833 #endif // PREDICTIONS_VERBOSE 1834 } 1835 1836 void 1837 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) { 1838 #if PREDICTIONS_VERBOSE 1839 if (!young) { 1840 _recorded_marked_bytes += hr->max_live_bytes(); 1841 } 1842 _predicted_bytes_to_copy += predict_bytes_to_copy(hr); 1843 #endif // PREDICTIONS_VERBOSE 1844 1845 size_t rs_length = hr->rem_set()->occupied(); 1846 _recorded_rs_lengths += rs_length; 1847 } 1848 1849 void 1850 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) { 1851 assert(!hr->is_young(), "should not call this"); 1852 ++_recorded_non_young_regions; 1853 record_cset_region_info(hr, false); 1854 } 1855 1856 void 1857 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) { 1858 _recorded_young_regions = n_regions; 1859 } 1860 1861 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) { 1862 #if PREDICTIONS_VERBOSE 1863 _recorded_young_bytes = bytes; 1864 #endif // PREDICTIONS_VERBOSE 1865 } 1866 1867 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1868 _recorded_rs_lengths = rs_lengths; 1869 } 1870 1871 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) { 1872 _predicted_bytes_to_copy = bytes; 1873 } 1874 1875 void 1876 G1CollectorPolicy::end_recording_regions() { 1877 // The _predicted_pause_time_ms field is referenced in code 1878 // not under PREDICTIONS_VERBOSE. Let's initialize it. 1879 _predicted_pause_time_ms = -1.0; 1880 1881 #if PREDICTIONS_VERBOSE 1882 _predicted_pending_cards = predict_pending_cards(); 1883 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff(); 1884 if (full_young_gcs()) 1885 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths); 1886 else 1887 _predicted_cards_scanned += 1888 predict_non_young_card_num(_predicted_rs_lengths); 1889 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions; 1890 1891 _predicted_rs_update_time_ms = 1892 predict_rs_update_time_ms(_g1->pending_card_num()); 1893 _predicted_rs_scan_time_ms = 1894 predict_rs_scan_time_ms(_predicted_cards_scanned); 1895 _predicted_object_copy_time_ms = 1896 predict_object_copy_time_ms(_predicted_bytes_to_copy); 1897 _predicted_constant_other_time_ms = 1898 predict_constant_other_time_ms(); 1899 _predicted_young_other_time_ms = 1900 predict_young_other_time_ms(_recorded_young_regions); 1901 _predicted_non_young_other_time_ms = 1902 predict_non_young_other_time_ms(_recorded_non_young_regions); 1903 1904 _predicted_pause_time_ms = 1905 _predicted_rs_update_time_ms + 1906 _predicted_rs_scan_time_ms + 1907 _predicted_object_copy_time_ms + 1908 _predicted_constant_other_time_ms + 1909 _predicted_young_other_time_ms + 1910 _predicted_non_young_other_time_ms; 1911 #endif // PREDICTIONS_VERBOSE 1912 } 1913 1914 void G1CollectorPolicy::check_if_region_is_too_expensive(double 1915 predicted_time_ms) { 1916 // I don't think we need to do this when in young GC mode since 1917 // marking will be initiated next time we hit the soft limit anyway... 1918 if (predicted_time_ms > _expensive_region_limit_ms) { 1919 if (!in_young_gc_mode()) { 1920 set_full_young_gcs(true); 1921 // We might want to do something different here. However, 1922 // right now we don't support the non-generational G1 mode 1923 // (and in fact we are planning to remove the associated code, 1924 // see CR 6814390). So, let's leave it as is and this will be 1925 // removed some time in the future 1926 ShouldNotReachHere(); 1927 set_during_initial_mark_pause(); 1928 } else 1929 // no point in doing another partial one 1930 _should_revert_to_full_young_gcs = true; 1931 } 1932 } 1933 1934 // </NEW PREDICTION> 1935 1936 1937 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1938 double elapsed_ms) { 1939 _recent_gc_times_ms->add(elapsed_ms); 1940 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1941 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1942 } 1943 1944 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() { 1945 if (_recent_pause_times_ms->num() == 0) { 1946 return (double) MaxGCPauseMillis; 1947 } 1948 return _recent_pause_times_ms->avg(); 1949 } 1950 1951 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() { 1952 if (_recent_rs_scan_times_ms->num() == 0) { 1953 return (double)MaxGCPauseMillis/3.0; 1954 } 1955 return _recent_rs_scan_times_ms->avg(); 1956 } 1957 1958 int G1CollectorPolicy::number_of_recent_gcs() { 1959 assert(_recent_rs_scan_times_ms->num() == 1960 _recent_pause_times_ms->num(), "Sequence out of sync"); 1961 assert(_recent_pause_times_ms->num() == 1962 _recent_CS_bytes_used_before->num(), "Sequence out of sync"); 1963 assert(_recent_CS_bytes_used_before->num() == 1964 _recent_CS_bytes_surviving->num(), "Sequence out of sync"); 1965 1966 return _recent_pause_times_ms->num(); 1967 } 1968 1969 double G1CollectorPolicy::recent_avg_survival_fraction() { 1970 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving, 1971 _recent_CS_bytes_used_before); 1972 } 1973 1974 double G1CollectorPolicy::last_survival_fraction() { 1975 return last_survival_fraction_work(_recent_CS_bytes_surviving, 1976 _recent_CS_bytes_used_before); 1977 } 1978 1979 double 1980 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving, 1981 TruncatedSeq* before) { 1982 assert(surviving->num() == before->num(), "Sequence out of sync"); 1983 if (before->sum() > 0.0) { 1984 double recent_survival_rate = surviving->sum() / before->sum(); 1985 // We exempt parallel collection from this check because Alloc Buffer 1986 // fragmentation can produce negative collections. 1987 // Further, we're now always doing parallel collection. But I'm still 1988 // leaving this here as a placeholder for a more precise assertion later. 1989 // (DLD, 10/05.) 1990 assert((true || G1CollectedHeap::use_parallel_gc_threads()) || 1991 _g1->evacuation_failed() || 1992 recent_survival_rate <= 1.0, "Or bad frac"); 1993 return recent_survival_rate; 1994 } else { 1995 return 1.0; // Be conservative. 1996 } 1997 } 1998 1999 double 2000 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving, 2001 TruncatedSeq* before) { 2002 assert(surviving->num() == before->num(), "Sequence out of sync"); 2003 if (surviving->num() > 0 && before->last() > 0.0) { 2004 double last_survival_rate = surviving->last() / before->last(); 2005 // We exempt parallel collection from this check because Alloc Buffer 2006 // fragmentation can produce negative collections. 2007 // Further, we're now always doing parallel collection. But I'm still 2008 // leaving this here as a placeholder for a more precise assertion later. 2009 // (DLD, 10/05.) 2010 assert((true || G1CollectedHeap::use_parallel_gc_threads()) || 2011 last_survival_rate <= 1.0, "Or bad frac"); 2012 return last_survival_rate; 2013 } else { 2014 return 1.0; 2015 } 2016 } 2017 2018 static const int survival_min_obs = 5; 2019 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 }; 2020 static const double min_survival_rate = 0.1; 2021 2022 double 2023 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg, 2024 double latest) { 2025 double res = avg; 2026 if (number_of_recent_gcs() < survival_min_obs) { 2027 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]); 2028 } 2029 res = MAX2(res, latest); 2030 res = MAX2(res, min_survival_rate); 2031 // In the parallel case, LAB fragmentation can produce "negative 2032 // collections"; so can evac failure. Cap at 1.0 2033 res = MIN2(res, 1.0); 2034 return res; 2035 } 2036 2037 size_t G1CollectorPolicy::expansion_amount() { 2038 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) { 2039 // We will double the existing space, or take 2040 // G1ExpandByPercentOfAvailable % of the available expansion 2041 // space, whichever is smaller, bounded below by a minimum 2042 // expansion (unless that's all that's left.) 2043 const size_t min_expand_bytes = 1*M; 2044 size_t reserved_bytes = _g1->max_capacity(); 2045 size_t committed_bytes = _g1->capacity(); 2046 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 2047 size_t expand_bytes; 2048 size_t expand_bytes_via_pct = 2049 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 2050 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 2051 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 2052 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 2053 if (G1PolicyVerbose > 1) { 2054 gclog_or_tty->print("Decided to expand: ratio = %5.2f, " 2055 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n" 2056 " Answer = %d.\n", 2057 recent_avg_pause_time_ratio(), 2058 byte_size_in_proper_unit(committed_bytes), 2059 proper_unit_for_byte_size(committed_bytes), 2060 byte_size_in_proper_unit(uncommitted_bytes), 2061 proper_unit_for_byte_size(uncommitted_bytes), 2062 byte_size_in_proper_unit(expand_bytes_via_pct), 2063 proper_unit_for_byte_size(expand_bytes_via_pct), 2064 byte_size_in_proper_unit(expand_bytes), 2065 proper_unit_for_byte_size(expand_bytes)); 2066 } 2067 return expand_bytes; 2068 } else { 2069 return 0; 2070 } 2071 } 2072 2073 void G1CollectorPolicy::note_start_of_mark_thread() { 2074 _mark_thread_startup_sec = os::elapsedTime(); 2075 } 2076 2077 class CountCSClosure: public HeapRegionClosure { 2078 G1CollectorPolicy* _g1_policy; 2079 public: 2080 CountCSClosure(G1CollectorPolicy* g1_policy) : 2081 _g1_policy(g1_policy) {} 2082 bool doHeapRegion(HeapRegion* r) { 2083 _g1_policy->_bytes_in_collection_set_before_gc += r->used(); 2084 return false; 2085 } 2086 }; 2087 2088 void G1CollectorPolicy::count_CS_bytes_used() { 2089 CountCSClosure cs_closure(this); 2090 _g1->collection_set_iterate(&cs_closure); 2091 } 2092 2093 void G1CollectorPolicy::print_summary (int level, 2094 const char* str, 2095 NumberSeq* seq) const { 2096 double sum = seq->sum(); 2097 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", 2098 str, sum / 1000.0, seq->avg()); 2099 } 2100 2101 void G1CollectorPolicy::print_summary_sd (int level, 2102 const char* str, 2103 NumberSeq* seq) const { 2104 print_summary(level, str, seq); 2105 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2106 seq->num(), seq->sd(), seq->maximum()); 2107 } 2108 2109 void G1CollectorPolicy::check_other_times(int level, 2110 NumberSeq* other_times_ms, 2111 NumberSeq* calc_other_times_ms) const { 2112 bool should_print = false; 2113 LineBuffer buf(level + 2); 2114 2115 double max_sum = MAX2(fabs(other_times_ms->sum()), 2116 fabs(calc_other_times_ms->sum())); 2117 double min_sum = MIN2(fabs(other_times_ms->sum()), 2118 fabs(calc_other_times_ms->sum())); 2119 double sum_ratio = max_sum / min_sum; 2120 if (sum_ratio > 1.1) { 2121 should_print = true; 2122 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); 2123 } 2124 2125 double max_avg = MAX2(fabs(other_times_ms->avg()), 2126 fabs(calc_other_times_ms->avg())); 2127 double min_avg = MIN2(fabs(other_times_ms->avg()), 2128 fabs(calc_other_times_ms->avg())); 2129 double avg_ratio = max_avg / min_avg; 2130 if (avg_ratio > 1.1) { 2131 should_print = true; 2132 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); 2133 } 2134 2135 if (other_times_ms->sum() < -0.01) { 2136 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); 2137 } 2138 2139 if (other_times_ms->avg() < -0.01) { 2140 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); 2141 } 2142 2143 if (calc_other_times_ms->sum() < -0.01) { 2144 should_print = true; 2145 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); 2146 } 2147 2148 if (calc_other_times_ms->avg() < -0.01) { 2149 should_print = true; 2150 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); 2151 } 2152 2153 if (should_print) 2154 print_summary(level, "Other(Calc)", calc_other_times_ms); 2155 } 2156 2157 void G1CollectorPolicy::print_summary(PauseSummary* summary) const { 2158 bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 2159 MainBodySummary* body_summary = summary->main_body_summary(); 2160 if (summary->get_total_seq()->num() > 0) { 2161 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq()); 2162 if (body_summary != NULL) { 2163 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq()); 2164 if (parallel) { 2165 print_summary(1, "Parallel Time", body_summary->get_parallel_seq()); 2166 print_summary(2, "Update RS", body_summary->get_update_rs_seq()); 2167 print_summary(2, "Ext Root Scanning", 2168 body_summary->get_ext_root_scan_seq()); 2169 print_summary(2, "Mark Stack Scanning", 2170 body_summary->get_mark_stack_scan_seq()); 2171 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq()); 2172 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq()); 2173 print_summary(2, "Termination", body_summary->get_termination_seq()); 2174 print_summary(2, "Other", body_summary->get_parallel_other_seq()); 2175 { 2176 NumberSeq* other_parts[] = { 2177 body_summary->get_update_rs_seq(), 2178 body_summary->get_ext_root_scan_seq(), 2179 body_summary->get_mark_stack_scan_seq(), 2180 body_summary->get_scan_rs_seq(), 2181 body_summary->get_obj_copy_seq(), 2182 body_summary->get_termination_seq() 2183 }; 2184 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(), 2185 6, other_parts); 2186 check_other_times(2, body_summary->get_parallel_other_seq(), 2187 &calc_other_times_ms); 2188 } 2189 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq()); 2190 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq()); 2191 } else { 2192 print_summary(1, "Update RS", body_summary->get_update_rs_seq()); 2193 print_summary(1, "Ext Root Scanning", 2194 body_summary->get_ext_root_scan_seq()); 2195 print_summary(1, "Mark Stack Scanning", 2196 body_summary->get_mark_stack_scan_seq()); 2197 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq()); 2198 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq()); 2199 } 2200 } 2201 print_summary(1, "Other", summary->get_other_seq()); 2202 { 2203 if (body_summary != NULL) { 2204 NumberSeq calc_other_times_ms; 2205 if (parallel) { 2206 // parallel 2207 NumberSeq* other_parts[] = { 2208 body_summary->get_satb_drain_seq(), 2209 body_summary->get_parallel_seq(), 2210 body_summary->get_clear_ct_seq() 2211 }; 2212 calc_other_times_ms = NumberSeq(summary->get_total_seq(), 2213 3, other_parts); 2214 } else { 2215 // serial 2216 NumberSeq* other_parts[] = { 2217 body_summary->get_satb_drain_seq(), 2218 body_summary->get_update_rs_seq(), 2219 body_summary->get_ext_root_scan_seq(), 2220 body_summary->get_mark_stack_scan_seq(), 2221 body_summary->get_scan_rs_seq(), 2222 body_summary->get_obj_copy_seq() 2223 }; 2224 calc_other_times_ms = NumberSeq(summary->get_total_seq(), 2225 6, other_parts); 2226 } 2227 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms); 2228 } 2229 } 2230 } else { 2231 LineBuffer(1).append_and_print_cr("none"); 2232 } 2233 LineBuffer(0).append_and_print_cr(""); 2234 } 2235 2236 void G1CollectorPolicy::print_tracing_info() const { 2237 if (TraceGen0Time) { 2238 gclog_or_tty->print_cr("ALL PAUSES"); 2239 print_summary_sd(0, "Total", _all_pause_times_ms); 2240 gclog_or_tty->print_cr(""); 2241 gclog_or_tty->print_cr(""); 2242 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num); 2243 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num); 2244 gclog_or_tty->print_cr(""); 2245 2246 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2247 print_summary(_summary); 2248 2249 gclog_or_tty->print_cr("MISC"); 2250 print_summary_sd(0, "Stop World", _all_stop_world_times_ms); 2251 print_summary_sd(0, "Yields", _all_yield_times_ms); 2252 for (int i = 0; i < _aux_num; ++i) { 2253 if (_all_aux_times_ms[i].num() > 0) { 2254 char buffer[96]; 2255 sprintf(buffer, "Aux%d", i); 2256 print_summary_sd(0, buffer, &_all_aux_times_ms[i]); 2257 } 2258 } 2259 2260 size_t all_region_num = _region_num_young + _region_num_tenured; 2261 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), " 2262 "Tenured %8d (%6.2lf%%)", 2263 all_region_num, 2264 _region_num_young, 2265 (double) _region_num_young / (double) all_region_num * 100.0, 2266 _region_num_tenured, 2267 (double) _region_num_tenured / (double) all_region_num * 100.0); 2268 } 2269 if (TraceGen1Time) { 2270 if (_all_full_gc_times_ms->num() > 0) { 2271 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2272 _all_full_gc_times_ms->num(), 2273 _all_full_gc_times_ms->sum() / 1000.0); 2274 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg()); 2275 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2276 _all_full_gc_times_ms->sd(), 2277 _all_full_gc_times_ms->maximum()); 2278 } 2279 } 2280 } 2281 2282 void G1CollectorPolicy::print_yg_surv_rate_info() const { 2283 #ifndef PRODUCT 2284 _short_lived_surv_rate_group->print_surv_rate_summary(); 2285 // add this call for any other surv rate groups 2286 #endif // PRODUCT 2287 } 2288 2289 void 2290 G1CollectorPolicy::update_region_num(bool young) { 2291 if (young) { 2292 ++_region_num_young; 2293 } else { 2294 ++_region_num_tenured; 2295 } 2296 } 2297 2298 #ifndef PRODUCT 2299 // for debugging, bit of a hack... 2300 static char* 2301 region_num_to_mbs(int length) { 2302 static char buffer[64]; 2303 double bytes = (double) (length * HeapRegion::GrainBytes); 2304 double mbs = bytes / (double) (1024 * 1024); 2305 sprintf(buffer, "%7.2lfMB", mbs); 2306 return buffer; 2307 } 2308 #endif // PRODUCT 2309 2310 size_t G1CollectorPolicy::max_regions(int purpose) { 2311 switch (purpose) { 2312 case GCAllocForSurvived: 2313 return _max_survivor_regions; 2314 case GCAllocForTenured: 2315 return REGIONS_UNLIMITED; 2316 default: 2317 ShouldNotReachHere(); 2318 return REGIONS_UNLIMITED; 2319 }; 2320 } 2321 2322 void G1CollectorPolicy::calculate_max_gc_locker_expansion() { 2323 size_t expansion_region_num = 0; 2324 if (GCLockerEdenExpansionPercent > 0) { 2325 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 2326 double expansion_region_num_d = perc * (double) _young_list_target_length; 2327 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 2328 // less than 1.0) we'll get 1. 2329 expansion_region_num = (size_t) ceil(expansion_region_num_d); 2330 } else { 2331 assert(expansion_region_num == 0, "sanity"); 2332 } 2333 _young_list_max_length = _young_list_target_length + expansion_region_num; 2334 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 2335 } 2336 2337 // Calculates survivor space parameters. 2338 void G1CollectorPolicy::calculate_survivors_policy() 2339 { 2340 if (G1FixedSurvivorSpaceSize == 0) { 2341 _max_survivor_regions = _young_list_target_length / SurvivorRatio; 2342 } else { 2343 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; 2344 } 2345 2346 if (G1FixedTenuringThreshold) { 2347 _tenuring_threshold = MaxTenuringThreshold; 2348 } else { 2349 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 2350 HeapRegion::GrainWords * _max_survivor_regions); 2351 } 2352 } 2353 2354 #ifndef PRODUCT 2355 class HRSortIndexIsOKClosure: public HeapRegionClosure { 2356 CollectionSetChooser* _chooser; 2357 public: 2358 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) : 2359 _chooser(chooser) {} 2360 2361 bool doHeapRegion(HeapRegion* r) { 2362 if (!r->continuesHumongous()) { 2363 assert(_chooser->regionProperlyOrdered(r), "Ought to be."); 2364 } 2365 return false; 2366 } 2367 }; 2368 2369 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() { 2370 HRSortIndexIsOKClosure cl(_collectionSetChooser); 2371 _g1->heap_region_iterate(&cl); 2372 return true; 2373 } 2374 #endif 2375 2376 bool 2377 G1CollectorPolicy::force_initial_mark_if_outside_cycle() { 2378 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 2379 if (!during_cycle) { 2380 set_initiate_conc_mark_if_possible(); 2381 return true; 2382 } else { 2383 return false; 2384 } 2385 } 2386 2387 void 2388 G1CollectorPolicy::decide_on_conc_mark_initiation() { 2389 // We are about to decide on whether this pause will be an 2390 // initial-mark pause. 2391 2392 // First, during_initial_mark_pause() should not be already set. We 2393 // will set it here if we have to. However, it should be cleared by 2394 // the end of the pause (it's only set for the duration of an 2395 // initial-mark pause). 2396 assert(!during_initial_mark_pause(), "pre-condition"); 2397 2398 if (initiate_conc_mark_if_possible()) { 2399 // We had noticed on a previous pause that the heap occupancy has 2400 // gone over the initiating threshold and we should start a 2401 // concurrent marking cycle. So we might initiate one. 2402 2403 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 2404 if (!during_cycle) { 2405 // The concurrent marking thread is not "during a cycle", i.e., 2406 // it has completed the last one. So we can go ahead and 2407 // initiate a new cycle. 2408 2409 set_during_initial_mark_pause(); 2410 2411 // And we can now clear initiate_conc_mark_if_possible() as 2412 // we've already acted on it. 2413 clear_initiate_conc_mark_if_possible(); 2414 } else { 2415 // The concurrent marking thread is still finishing up the 2416 // previous cycle. If we start one right now the two cycles 2417 // overlap. In particular, the concurrent marking thread might 2418 // be in the process of clearing the next marking bitmap (which 2419 // we will use for the next cycle if we start one). Starting a 2420 // cycle now will be bad given that parts of the marking 2421 // information might get cleared by the marking thread. And we 2422 // cannot wait for the marking thread to finish the cycle as it 2423 // periodically yields while clearing the next marking bitmap 2424 // and, if it's in a yield point, it's waiting for us to 2425 // finish. So, at this point we will not start a cycle and we'll 2426 // let the concurrent marking thread complete the last one. 2427 } 2428 } 2429 } 2430 2431 void 2432 G1CollectorPolicy_BestRegionsFirst:: 2433 record_collection_pause_start(double start_time_sec, size_t start_used) { 2434 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); 2435 } 2436 2437 class KnownGarbageClosure: public HeapRegionClosure { 2438 CollectionSetChooser* _hrSorted; 2439 2440 public: 2441 KnownGarbageClosure(CollectionSetChooser* hrSorted) : 2442 _hrSorted(hrSorted) 2443 {} 2444 2445 bool doHeapRegion(HeapRegion* r) { 2446 // We only include humongous regions in collection 2447 // sets when concurrent mark shows that their contained object is 2448 // unreachable. 2449 2450 // Do we have any marking information for this region? 2451 if (r->is_marked()) { 2452 // We don't include humongous regions in collection 2453 // sets because we collect them immediately at the end of a marking 2454 // cycle. We also don't include young regions because we *must* 2455 // include them in the next collection pause. 2456 if (!r->isHumongous() && !r->is_young()) { 2457 _hrSorted->addMarkedHeapRegion(r); 2458 } 2459 } 2460 return false; 2461 } 2462 }; 2463 2464 class ParKnownGarbageHRClosure: public HeapRegionClosure { 2465 CollectionSetChooser* _hrSorted; 2466 jint _marked_regions_added; 2467 jint _chunk_size; 2468 jint _cur_chunk_idx; 2469 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) 2470 int _worker; 2471 int _invokes; 2472 2473 void get_new_chunk() { 2474 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size); 2475 _cur_chunk_end = _cur_chunk_idx + _chunk_size; 2476 } 2477 void add_region(HeapRegion* r) { 2478 if (_cur_chunk_idx == _cur_chunk_end) { 2479 get_new_chunk(); 2480 } 2481 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); 2482 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); 2483 _marked_regions_added++; 2484 _cur_chunk_idx++; 2485 } 2486 2487 public: 2488 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 2489 jint chunk_size, 2490 int worker) : 2491 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), 2492 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0), 2493 _invokes(0) 2494 {} 2495 2496 bool doHeapRegion(HeapRegion* r) { 2497 // We only include humongous regions in collection 2498 // sets when concurrent mark shows that their contained object is 2499 // unreachable. 2500 _invokes++; 2501 2502 // Do we have any marking information for this region? 2503 if (r->is_marked()) { 2504 // We don't include humongous regions in collection 2505 // sets because we collect them immediately at the end of a marking 2506 // cycle. 2507 // We also do not include young regions in collection sets 2508 if (!r->isHumongous() && !r->is_young()) { 2509 add_region(r); 2510 } 2511 } 2512 return false; 2513 } 2514 jint marked_regions_added() { return _marked_regions_added; } 2515 int invokes() { return _invokes; } 2516 }; 2517 2518 class ParKnownGarbageTask: public AbstractGangTask { 2519 CollectionSetChooser* _hrSorted; 2520 jint _chunk_size; 2521 G1CollectedHeap* _g1; 2522 public: 2523 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : 2524 AbstractGangTask("ParKnownGarbageTask"), 2525 _hrSorted(hrSorted), _chunk_size(chunk_size), 2526 _g1(G1CollectedHeap::heap()) 2527 {} 2528 2529 void work(int i) { 2530 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i); 2531 // Back to zero for the claim value. 2532 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, 2533 HeapRegion::InitialClaimValue); 2534 jint regions_added = parKnownGarbageCl.marked_regions_added(); 2535 _hrSorted->incNumMarkedHeapRegions(regions_added); 2536 if (G1PrintParCleanupStats) { 2537 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", 2538 i, parKnownGarbageCl.invokes(), regions_added); 2539 } 2540 } 2541 }; 2542 2543 void 2544 G1CollectorPolicy_BestRegionsFirst:: 2545 record_concurrent_mark_cleanup_end(size_t freed_bytes, 2546 size_t max_live_bytes) { 2547 double start; 2548 if (G1PrintParCleanupStats) start = os::elapsedTime(); 2549 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); 2550 2551 _collectionSetChooser->clearMarkedHeapRegions(); 2552 double clear_marked_end; 2553 if (G1PrintParCleanupStats) { 2554 clear_marked_end = os::elapsedTime(); 2555 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.", 2556 (clear_marked_end - start)*1000.0); 2557 } 2558 if (G1CollectedHeap::use_parallel_gc_threads()) { 2559 const size_t OverpartitionFactor = 4; 2560 const size_t MinWorkUnit = 8; 2561 const size_t WorkUnit = 2562 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), 2563 MinWorkUnit); 2564 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), 2565 WorkUnit); 2566 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 2567 (int) WorkUnit); 2568 _g1->workers()->run_task(&parKnownGarbageTask); 2569 2570 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2571 "sanity check"); 2572 } else { 2573 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 2574 _g1->heap_region_iterate(&knownGarbagecl); 2575 } 2576 double known_garbage_end; 2577 if (G1PrintParCleanupStats) { 2578 known_garbage_end = os::elapsedTime(); 2579 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", 2580 (known_garbage_end - clear_marked_end)*1000.0); 2581 } 2582 _collectionSetChooser->sortMarkedHeapRegions(); 2583 double sort_end; 2584 if (G1PrintParCleanupStats) { 2585 sort_end = os::elapsedTime(); 2586 gclog_or_tty->print_cr(" sorting: %8.3f ms.", 2587 (sort_end - known_garbage_end)*1000.0); 2588 } 2589 2590 record_concurrent_mark_cleanup_end_work2(); 2591 double work2_end; 2592 if (G1PrintParCleanupStats) { 2593 work2_end = os::elapsedTime(); 2594 gclog_or_tty->print_cr(" work2: %8.3f ms.", 2595 (work2_end - sort_end)*1000.0); 2596 } 2597 } 2598 2599 // Add the heap region at the head of the non-incremental collection set 2600 void G1CollectorPolicy:: 2601 add_to_collection_set(HeapRegion* hr) { 2602 assert(_inc_cset_build_state == Active, "Precondition"); 2603 assert(!hr->is_young(), "non-incremental add of young region"); 2604 2605 if (_g1->mark_in_progress()) 2606 _g1->concurrent_mark()->registerCSetRegion(hr); 2607 2608 assert(!hr->in_collection_set(), "should not already be in the CSet"); 2609 hr->set_in_collection_set(true); 2610 hr->set_next_in_collection_set(_collection_set); 2611 _collection_set = hr; 2612 _collection_set_size++; 2613 _collection_set_bytes_used_before += hr->used(); 2614 _g1->register_region_with_in_cset_fast_test(hr); 2615 } 2616 2617 // Initialize the per-collection-set information 2618 void G1CollectorPolicy::start_incremental_cset_building() { 2619 assert(_inc_cset_build_state == Inactive, "Precondition"); 2620 2621 _inc_cset_head = NULL; 2622 _inc_cset_tail = NULL; 2623 _inc_cset_size = 0; 2624 _inc_cset_bytes_used_before = 0; 2625 2626 if (in_young_gc_mode()) { 2627 _inc_cset_young_index = 0; 2628 } 2629 2630 _inc_cset_max_finger = 0; 2631 _inc_cset_recorded_young_bytes = 0; 2632 _inc_cset_recorded_rs_lengths = 0; 2633 _inc_cset_predicted_elapsed_time_ms = 0; 2634 _inc_cset_predicted_bytes_to_copy = 0; 2635 _inc_cset_build_state = Active; 2636 } 2637 2638 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 2639 // This routine is used when: 2640 // * adding survivor regions to the incremental cset at the end of an 2641 // evacuation pause, 2642 // * adding the current allocation region to the incremental cset 2643 // when it is retired, and 2644 // * updating existing policy information for a region in the 2645 // incremental cset via young list RSet sampling. 2646 // Therefore this routine may be called at a safepoint by the 2647 // VM thread, or in-between safepoints by mutator threads (when 2648 // retiring the current allocation region) or a concurrent 2649 // refine thread (RSet sampling). 2650 2651 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true); 2652 size_t used_bytes = hr->used(); 2653 2654 _inc_cset_recorded_rs_lengths += rs_length; 2655 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 2656 2657 _inc_cset_bytes_used_before += used_bytes; 2658 2659 // Cache the values we have added to the aggregated informtion 2660 // in the heap region in case we have to remove this region from 2661 // the incremental collection set, or it is updated by the 2662 // rset sampling code 2663 hr->set_recorded_rs_length(rs_length); 2664 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 2665 2666 #if PREDICTIONS_VERBOSE 2667 size_t bytes_to_copy = predict_bytes_to_copy(hr); 2668 _inc_cset_predicted_bytes_to_copy += bytes_to_copy; 2669 2670 // Record the number of bytes used in this region 2671 _inc_cset_recorded_young_bytes += used_bytes; 2672 2673 // Cache the values we have added to the aggregated informtion 2674 // in the heap region in case we have to remove this region from 2675 // the incremental collection set, or it is updated by the 2676 // rset sampling code 2677 hr->set_predicted_bytes_to_copy(bytes_to_copy); 2678 #endif // PREDICTIONS_VERBOSE 2679 } 2680 2681 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) { 2682 // This routine is currently only called as part of the updating of 2683 // existing policy information for regions in the incremental cset that 2684 // is performed by the concurrent refine thread(s) as part of young list 2685 // RSet sampling. Therefore we should not be at a safepoint. 2686 2687 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 2688 assert(hr->is_young(), "it should be"); 2689 2690 size_t used_bytes = hr->used(); 2691 size_t old_rs_length = hr->recorded_rs_length(); 2692 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 2693 2694 // Subtract the old recorded/predicted policy information for 2695 // the given heap region from the collection set info. 2696 _inc_cset_recorded_rs_lengths -= old_rs_length; 2697 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms; 2698 2699 _inc_cset_bytes_used_before -= used_bytes; 2700 2701 // Clear the values cached in the heap region 2702 hr->set_recorded_rs_length(0); 2703 hr->set_predicted_elapsed_time_ms(0); 2704 2705 #if PREDICTIONS_VERBOSE 2706 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy(); 2707 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy; 2708 2709 // Subtract the number of bytes used in this region 2710 _inc_cset_recorded_young_bytes -= used_bytes; 2711 2712 // Clear the values cached in the heap region 2713 hr->set_predicted_bytes_to_copy(0); 2714 #endif // PREDICTIONS_VERBOSE 2715 } 2716 2717 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) { 2718 // Update the collection set information that is dependent on the new RS length 2719 assert(hr->is_young(), "Precondition"); 2720 2721 remove_from_incremental_cset_info(hr); 2722 add_to_incremental_cset_info(hr, new_rs_length); 2723 } 2724 2725 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 2726 assert( hr->is_young(), "invariant"); 2727 assert( hr->young_index_in_cset() == -1, "invariant" ); 2728 assert(_inc_cset_build_state == Active, "Precondition"); 2729 2730 // We need to clear and set the cached recorded/cached collection set 2731 // information in the heap region here (before the region gets added 2732 // to the collection set). An individual heap region's cached values 2733 // are calculated, aggregated with the policy collection set info, 2734 // and cached in the heap region here (initially) and (subsequently) 2735 // by the Young List sampling code. 2736 2737 size_t rs_length = hr->rem_set()->occupied(); 2738 add_to_incremental_cset_info(hr, rs_length); 2739 2740 HeapWord* hr_end = hr->end(); 2741 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 2742 2743 assert(!hr->in_collection_set(), "invariant"); 2744 hr->set_in_collection_set(true); 2745 assert( hr->next_in_collection_set() == NULL, "invariant"); 2746 2747 _inc_cset_size++; 2748 _g1->register_region_with_in_cset_fast_test(hr); 2749 2750 hr->set_young_index_in_cset((int) _inc_cset_young_index); 2751 ++_inc_cset_young_index; 2752 } 2753 2754 // Add the region at the RHS of the incremental cset 2755 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 2756 // We should only ever be appending survivors at the end of a pause 2757 assert( hr->is_survivor(), "Logic"); 2758 2759 // Do the 'common' stuff 2760 add_region_to_incremental_cset_common(hr); 2761 2762 // Now add the region at the right hand side 2763 if (_inc_cset_tail == NULL) { 2764 assert(_inc_cset_head == NULL, "invariant"); 2765 _inc_cset_head = hr; 2766 } else { 2767 _inc_cset_tail->set_next_in_collection_set(hr); 2768 } 2769 _inc_cset_tail = hr; 2770 } 2771 2772 // Add the region to the LHS of the incremental cset 2773 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 2774 // Survivors should be added to the RHS at the end of a pause 2775 assert(!hr->is_survivor(), "Logic"); 2776 2777 // Do the 'common' stuff 2778 add_region_to_incremental_cset_common(hr); 2779 2780 // Add the region at the left hand side 2781 hr->set_next_in_collection_set(_inc_cset_head); 2782 if (_inc_cset_head == NULL) { 2783 assert(_inc_cset_tail == NULL, "Invariant"); 2784 _inc_cset_tail = hr; 2785 } 2786 _inc_cset_head = hr; 2787 } 2788 2789 #ifndef PRODUCT 2790 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 2791 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 2792 2793 st->print_cr("\nCollection_set:"); 2794 HeapRegion* csr = list_head; 2795 while (csr != NULL) { 2796 HeapRegion* next = csr->next_in_collection_set(); 2797 assert(csr->in_collection_set(), "bad CS"); 2798 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " 2799 "age: %4d, y: %d, surv: %d", 2800 csr->bottom(), csr->end(), 2801 csr->top(), 2802 csr->prev_top_at_mark_start(), 2803 csr->next_top_at_mark_start(), 2804 csr->top_at_conc_mark_count(), 2805 csr->age_in_surv_rate_group_cond(), 2806 csr->is_young(), 2807 csr->is_survivor()); 2808 csr = next; 2809 } 2810 } 2811 #endif // !PRODUCT 2812 2813 void 2814 G1CollectorPolicy_BestRegionsFirst::choose_collection_set( 2815 double target_pause_time_ms) { 2816 // Set this here - in case we're not doing young collections. 2817 double non_young_start_time_sec = os::elapsedTime(); 2818 2819 start_recording_regions(); 2820 2821 guarantee(target_pause_time_ms > 0.0, 2822 err_msg("target_pause_time_ms = %1.6lf should be positive", 2823 target_pause_time_ms)); 2824 guarantee(_collection_set == NULL, "Precondition"); 2825 2826 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 2827 double predicted_pause_time_ms = base_time_ms; 2828 2829 double time_remaining_ms = target_pause_time_ms - base_time_ms; 2830 2831 // the 10% and 50% values are arbitrary... 2832 if (time_remaining_ms < 0.10 * target_pause_time_ms) { 2833 time_remaining_ms = 0.50 * target_pause_time_ms; 2834 _within_target = false; 2835 } else { 2836 _within_target = true; 2837 } 2838 2839 // We figure out the number of bytes available for future to-space. 2840 // For new regions without marking information, we must assume the 2841 // worst-case of complete survival. If we have marking information for a 2842 // region, we can bound the amount of live data. We can add a number of 2843 // such regions, as long as the sum of the live data bounds does not 2844 // exceed the available evacuation space. 2845 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes; 2846 2847 size_t expansion_bytes = 2848 _g1->expansion_regions() * HeapRegion::GrainBytes; 2849 2850 _collection_set_bytes_used_before = 0; 2851 _collection_set_size = 0; 2852 2853 // Adjust for expansion and slop. 2854 max_live_bytes = max_live_bytes + expansion_bytes; 2855 2856 HeapRegion* hr; 2857 if (in_young_gc_mode()) { 2858 double young_start_time_sec = os::elapsedTime(); 2859 2860 if (G1PolicyVerbose > 0) { 2861 gclog_or_tty->print_cr("Adding %d young regions to the CSet", 2862 _g1->young_list()->length()); 2863 } 2864 2865 _young_cset_length = 0; 2866 _last_young_gc_full = full_young_gcs() ? true : false; 2867 2868 if (_last_young_gc_full) 2869 ++_full_young_pause_num; 2870 else 2871 ++_partial_young_pause_num; 2872 2873 // The young list is laid with the survivor regions from the previous 2874 // pause are appended to the RHS of the young list, i.e. 2875 // [Newly Young Regions ++ Survivors from last pause]. 2876 2877 hr = _g1->young_list()->first_survivor_region(); 2878 while (hr != NULL) { 2879 assert(hr->is_survivor(), "badly formed young list"); 2880 hr->set_young(); 2881 hr = hr->get_next_young_region(); 2882 } 2883 2884 // Clear the fields that point to the survivor list - they are 2885 // all young now. 2886 _g1->young_list()->clear_survivors(); 2887 2888 if (_g1->mark_in_progress()) 2889 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger); 2890 2891 _young_cset_length = _inc_cset_young_index; 2892 _collection_set = _inc_cset_head; 2893 _collection_set_size = _inc_cset_size; 2894 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 2895 2896 // For young regions in the collection set, we assume the worst 2897 // case of complete survival 2898 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes; 2899 2900 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms; 2901 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 2902 2903 // The number of recorded young regions is the incremental 2904 // collection set's current size 2905 set_recorded_young_regions(_inc_cset_size); 2906 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 2907 set_recorded_young_bytes(_inc_cset_recorded_young_bytes); 2908 #if PREDICTIONS_VERBOSE 2909 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy); 2910 #endif // PREDICTIONS_VERBOSE 2911 2912 if (G1PolicyVerbose > 0) { 2913 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.", 2914 _inc_cset_size); 2915 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", 2916 max_live_bytes/K); 2917 } 2918 2919 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant"); 2920 2921 double young_end_time_sec = os::elapsedTime(); 2922 _recorded_young_cset_choice_time_ms = 2923 (young_end_time_sec - young_start_time_sec) * 1000.0; 2924 2925 // We are doing young collections so reset this. 2926 non_young_start_time_sec = young_end_time_sec; 2927 2928 // Note we can use either _collection_set_size or 2929 // _young_cset_length here 2930 if (_collection_set_size > 0 && _last_young_gc_full) { 2931 // don't bother adding more regions... 2932 goto choose_collection_set_end; 2933 } 2934 } 2935 2936 if (!in_young_gc_mode() || !full_young_gcs()) { 2937 bool should_continue = true; 2938 NumberSeq seq; 2939 double avg_prediction = 100000000000000000.0; // something very large 2940 2941 do { 2942 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, 2943 avg_prediction); 2944 if (hr != NULL) { 2945 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); 2946 time_remaining_ms -= predicted_time_ms; 2947 predicted_pause_time_ms += predicted_time_ms; 2948 add_to_collection_set(hr); 2949 record_non_young_cset_region(hr); 2950 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes); 2951 if (G1PolicyVerbose > 0) { 2952 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", 2953 max_live_bytes/K); 2954 } 2955 seq.add(predicted_time_ms); 2956 avg_prediction = seq.avg() + seq.sd(); 2957 } 2958 should_continue = 2959 ( hr != NULL) && 2960 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0 2961 : _collection_set_size < _young_list_fixed_length ); 2962 } while (should_continue); 2963 2964 if (!adaptive_young_list_length() && 2965 _collection_set_size < _young_list_fixed_length) 2966 _should_revert_to_full_young_gcs = true; 2967 } 2968 2969 choose_collection_set_end: 2970 stop_incremental_cset_building(); 2971 2972 count_CS_bytes_used(); 2973 2974 end_recording_regions(); 2975 2976 double non_young_end_time_sec = os::elapsedTime(); 2977 _recorded_non_young_cset_choice_time_ms = 2978 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; 2979 } 2980 2981 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { 2982 G1CollectorPolicy::record_full_collection_end(); 2983 _collectionSetChooser->updateAfterFullCollection(); 2984 } 2985 2986 void G1CollectorPolicy_BestRegionsFirst:: 2987 expand_if_possible(size_t numRegions) { 2988 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes; 2989 _g1->expand(expansion_bytes); 2990 } 2991 2992 void G1CollectorPolicy_BestRegionsFirst:: 2993 record_collection_pause_end() { 2994 G1CollectorPolicy::record_collection_pause_end(); 2995 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); 2996 }