1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 40 class TraceGen0TimeData : public CHeapObj { 41 private: 42 unsigned _young_pause_num; 43 unsigned _mixed_pause_num; 44 45 NumberSeq _all_stop_world_times_ms; 46 NumberSeq _all_yield_times_ms; 47 48 NumberSeq _total; 49 NumberSeq _other; 50 NumberSeq _root_region_scan_wait; 51 NumberSeq _parallel; 52 NumberSeq _ext_root_scan; 53 NumberSeq _satb_filtering; 54 NumberSeq _update_rs; 55 NumberSeq _scan_rs; 56 NumberSeq _obj_copy; 57 NumberSeq _termination; 58 NumberSeq _parallel_other; 59 NumberSeq _clear_ct; 60 61 void print_summary (int level, const char* str, const NumberSeq* seq) const; 62 void print_summary_sd (int level, const char* str, const NumberSeq* seq) const; 63 64 public: 65 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; 66 void record_start_collection(double time_to_stop_the_world_ms); 67 void record_yield_time(double yield_time_ms); 68 void record_end_collection( 69 double total_ms, 70 double other_ms, 71 double root_region_scan_wait_ms, 72 double parallel_ms, 73 double ext_root_scan_ms, 74 double satb_filtering_ms, 75 double update_rs_ms, 76 double scan_rs_ms, 77 double obj_copy_ms, 78 double termination_ms, 79 double parallel_other_ms, 80 double clear_ct_ms); 81 void increment_young_collection_count(); 82 void increment_mixed_collection_count(); 83 void print() const; 84 }; 85 86 class TraceGen1TimeData : public CHeapObj { 87 private: 88 NumberSeq _all_full_gc_times; 89 90 public: 91 void record_full_collection(double full_gc_time_ms); 92 void print() const; 93 }; 94 95 // There are three command line options related to the young gen size: 96 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 97 // just a short form for NewSize==MaxNewSize). G1 will use its internal 98 // heuristics to calculate the actual young gen size, so these options 99 // basically only limit the range within which G1 can pick a young gen 100 // size. Also, these are general options taking byte sizes. G1 will 101 // internally work with a number of regions instead. So, some rounding 102 // will occur. 103 // 104 // If nothing related to the the young gen size is set on the command 105 // line we should allow the young gen to be between 106 // G1DefaultMinNewGenPercent and G1DefaultMaxNewGenPercent of the 107 // heap size. This means that every time the heap size changes the 108 // limits for the young gen size will be updated. 109 // 110 // If only -XX:NewSize is set we should use the specified value as the 111 // minimum size for young gen. Still using G1DefaultMaxNewGenPercent 112 // of the heap as maximum. 113 // 114 // If only -XX:MaxNewSize is set we should use the specified value as the 115 // maximum size for young gen. Still using G1DefaultMinNewGenPercent 116 // of the heap as minimum. 117 // 118 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 119 // No updates when the heap size changes. There is a special case when 120 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 121 // different heuristic for calculating the collection set when we do mixed 122 // collection. 123 // 124 // If only -XX:NewRatio is set we should use the specified ratio of the heap 125 // as both min and max. This will be interpreted as "fixed" just like the 126 // NewSize==MaxNewSize case above. But we will update the min and max 127 // everytime the heap size changes. 128 // 129 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 130 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 131 class G1YoungGenSizer : public CHeapObj { 132 private: 133 enum SizerKind { 134 SizerDefaults, 135 SizerNewSizeOnly, 136 SizerMaxNewSizeOnly, 137 SizerMaxAndNewSize, 138 SizerNewRatio 139 }; 140 SizerKind _sizer_kind; 141 uint _min_desired_young_length; 142 uint _max_desired_young_length; 143 bool _adaptive_size; 144 uint calculate_default_min_length(uint new_number_of_heap_regions); 145 uint calculate_default_max_length(uint new_number_of_heap_regions); 146 147 public: 148 G1YoungGenSizer(); 149 void heap_size_changed(uint new_number_of_heap_regions); 150 uint min_desired_young_length() { 151 return _min_desired_young_length; 152 } 153 uint max_desired_young_length() { 154 return _max_desired_young_length; 155 } 156 bool adaptive_young_list_length() { 157 return _adaptive_size; 158 } 159 }; 160 161 class G1CollectorPolicy: public CollectorPolicy { 162 private: 163 // either equal to the number of parallel threads, if ParallelGCThreads 164 // has been set, or 1 otherwise 165 int _parallel_gc_threads; 166 167 // The number of GC threads currently active. 168 uintx _no_of_gc_threads; 169 170 enum SomePrivateConstants { 171 NumPrevPausesForHeuristics = 10 172 }; 173 174 G1MMUTracker* _mmu_tracker; 175 176 void initialize_flags(); 177 178 void initialize_all() { 179 initialize_flags(); 180 initialize_size_info(); 181 initialize_perm_generation(PermGen::MarkSweepCompact); 182 } 183 184 CollectionSetChooser* _collectionSetChooser; 185 186 double _cur_collection_start_sec; 187 size_t _cur_collection_pause_used_at_start_bytes; 188 uint _cur_collection_pause_used_regions_at_start; 189 double _cur_collection_par_time_ms; 190 191 double _cur_collection_code_root_fixup_time_ms; 192 193 double _cur_clear_ct_time_ms; 194 double _cur_ref_proc_time_ms; 195 double _cur_ref_enq_time_ms; 196 197 #ifndef PRODUCT 198 // Card Table Count Cache stats 199 double _min_clear_cc_time_ms; // min 200 double _max_clear_cc_time_ms; // max 201 double _cur_clear_cc_time_ms; // clearing time during current pause 202 double _cum_clear_cc_time_ms; // cummulative clearing time 203 jlong _num_cc_clears; // number of times the card count cache has been cleared 204 #endif 205 206 // These exclude marking times. 207 TruncatedSeq* _recent_gc_times_ms; 208 209 TruncatedSeq* _concurrent_mark_remark_times_ms; 210 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 211 212 TraceGen0TimeData _trace_gen0_time_data; 213 TraceGen1TimeData _trace_gen1_time_data; 214 215 double _stop_world_start; 216 217 double* _par_last_gc_worker_start_times_ms; 218 double* _par_last_ext_root_scan_times_ms; 219 double* _par_last_satb_filtering_times_ms; 220 double* _par_last_update_rs_times_ms; 221 double* _par_last_update_rs_processed_buffers; 222 double* _par_last_scan_rs_times_ms; 223 double* _par_last_obj_copy_times_ms; 224 double* _par_last_termination_times_ms; 225 double* _par_last_termination_attempts; 226 double* _par_last_gc_worker_end_times_ms; 227 double* _par_last_gc_worker_times_ms; 228 229 // Each workers 'other' time i.e. the elapsed time of the parallel 230 // code executed by a worker minus the sum of the individual sub-phase 231 // times for that worker thread. 232 double* _par_last_gc_worker_other_times_ms; 233 234 // indicates whether we are in young or mixed GC mode 235 bool _gcs_are_young; 236 237 uint _young_list_target_length; 238 uint _young_list_fixed_length; 239 size_t _prev_eden_capacity; // used for logging 240 241 // The max number of regions we can extend the eden by while the GC 242 // locker is active. This should be >= _young_list_target_length; 243 uint _young_list_max_length; 244 245 bool _last_gc_was_young; 246 247 bool _during_marking; 248 bool _in_marking_window; 249 bool _in_marking_window_im; 250 251 SurvRateGroup* _short_lived_surv_rate_group; 252 SurvRateGroup* _survivor_surv_rate_group; 253 // add here any more surv rate groups 254 255 double _gc_overhead_perc; 256 257 double _reserve_factor; 258 uint _reserve_regions; 259 260 bool during_marking() { 261 return _during_marking; 262 } 263 264 private: 265 enum PredictionConstants { 266 TruncatedSeqLength = 10 267 }; 268 269 TruncatedSeq* _alloc_rate_ms_seq; 270 double _prev_collection_pause_end_ms; 271 272 TruncatedSeq* _pending_card_diff_seq; 273 TruncatedSeq* _rs_length_diff_seq; 274 TruncatedSeq* _cost_per_card_ms_seq; 275 TruncatedSeq* _young_cards_per_entry_ratio_seq; 276 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 277 TruncatedSeq* _cost_per_entry_ms_seq; 278 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 279 TruncatedSeq* _cost_per_byte_ms_seq; 280 TruncatedSeq* _constant_other_time_ms_seq; 281 TruncatedSeq* _young_other_cost_per_region_ms_seq; 282 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 283 284 TruncatedSeq* _pending_cards_seq; 285 TruncatedSeq* _rs_lengths_seq; 286 287 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 288 289 G1YoungGenSizer* _young_gen_sizer; 290 291 uint _eden_cset_region_length; 292 uint _survivor_cset_region_length; 293 uint _old_cset_region_length; 294 295 void init_cset_region_lengths(uint eden_cset_region_length, 296 uint survivor_cset_region_length); 297 298 uint eden_cset_region_length() { return _eden_cset_region_length; } 299 uint survivor_cset_region_length() { return _survivor_cset_region_length; } 300 uint old_cset_region_length() { return _old_cset_region_length; } 301 302 uint _free_regions_at_end_of_collection; 303 304 size_t _recorded_rs_lengths; 305 size_t _max_rs_lengths; 306 307 double _recorded_young_free_cset_time_ms; 308 double _recorded_non_young_free_cset_time_ms; 309 310 double _sigma; 311 312 size_t _rs_lengths_prediction; 313 314 double sigma() { return _sigma; } 315 316 // A function that prevents us putting too much stock in small sample 317 // sets. Returns a number between 2.0 and 1.0, depending on the number 318 // of samples. 5 or more samples yields one; fewer scales linearly from 319 // 2.0 at 1 sample to 1.0 at 5. 320 double confidence_factor(int samples) { 321 if (samples > 4) return 1.0; 322 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 323 } 324 325 double get_new_neg_prediction(TruncatedSeq* seq) { 326 return seq->davg() - sigma() * seq->dsd(); 327 } 328 329 #ifndef PRODUCT 330 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 331 #endif // PRODUCT 332 333 void adjust_concurrent_refinement(double update_rs_time, 334 double update_rs_processed_buffers, 335 double goal_ms); 336 337 uintx no_of_gc_threads() { return _no_of_gc_threads; } 338 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; } 339 340 double _pause_time_target_ms; 341 double _recorded_young_cset_choice_time_ms; 342 double _recorded_non_young_cset_choice_time_ms; 343 size_t _pending_cards; 344 size_t _max_pending_cards; 345 346 public: 347 // Accessors 348 349 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 350 hr->set_young(); 351 hr->install_surv_rate_group(_short_lived_surv_rate_group); 352 hr->set_young_index_in_cset(young_index_in_cset); 353 } 354 355 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 356 assert(hr->is_young() && hr->is_survivor(), "pre-condition"); 357 hr->install_surv_rate_group(_survivor_surv_rate_group); 358 hr->set_young_index_in_cset(young_index_in_cset); 359 } 360 361 #ifndef PRODUCT 362 bool verify_young_ages(); 363 #endif // PRODUCT 364 365 double get_new_prediction(TruncatedSeq* seq) { 366 return MAX2(seq->davg() + sigma() * seq->dsd(), 367 seq->davg() * confidence_factor(seq->num())); 368 } 369 370 void record_max_rs_lengths(size_t rs_lengths) { 371 _max_rs_lengths = rs_lengths; 372 } 373 374 size_t predict_pending_card_diff() { 375 double prediction = get_new_neg_prediction(_pending_card_diff_seq); 376 if (prediction < 0.00001) { 377 return 0; 378 } else { 379 return (size_t) prediction; 380 } 381 } 382 383 size_t predict_pending_cards() { 384 size_t max_pending_card_num = _g1->max_pending_card_num(); 385 size_t diff = predict_pending_card_diff(); 386 size_t prediction; 387 if (diff > max_pending_card_num) { 388 prediction = max_pending_card_num; 389 } else { 390 prediction = max_pending_card_num - diff; 391 } 392 393 return prediction; 394 } 395 396 size_t predict_rs_length_diff() { 397 return (size_t) get_new_prediction(_rs_length_diff_seq); 398 } 399 400 double predict_alloc_rate_ms() { 401 return get_new_prediction(_alloc_rate_ms_seq); 402 } 403 404 double predict_cost_per_card_ms() { 405 return get_new_prediction(_cost_per_card_ms_seq); 406 } 407 408 double predict_rs_update_time_ms(size_t pending_cards) { 409 return (double) pending_cards * predict_cost_per_card_ms(); 410 } 411 412 double predict_young_cards_per_entry_ratio() { 413 return get_new_prediction(_young_cards_per_entry_ratio_seq); 414 } 415 416 double predict_mixed_cards_per_entry_ratio() { 417 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 418 return predict_young_cards_per_entry_ratio(); 419 } else { 420 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 421 } 422 } 423 424 size_t predict_young_card_num(size_t rs_length) { 425 return (size_t) ((double) rs_length * 426 predict_young_cards_per_entry_ratio()); 427 } 428 429 size_t predict_non_young_card_num(size_t rs_length) { 430 return (size_t) ((double) rs_length * 431 predict_mixed_cards_per_entry_ratio()); 432 } 433 434 double predict_rs_scan_time_ms(size_t card_num) { 435 if (gcs_are_young()) { 436 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 437 } else { 438 return predict_mixed_rs_scan_time_ms(card_num); 439 } 440 } 441 442 double predict_mixed_rs_scan_time_ms(size_t card_num) { 443 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 444 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 445 } else { 446 return (double) (card_num * 447 get_new_prediction(_mixed_cost_per_entry_ms_seq)); 448 } 449 } 450 451 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 452 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 453 return (1.1 * (double) bytes_to_copy) * 454 get_new_prediction(_cost_per_byte_ms_seq); 455 } else { 456 return (double) bytes_to_copy * 457 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 458 } 459 } 460 461 double predict_object_copy_time_ms(size_t bytes_to_copy) { 462 if (_in_marking_window && !_in_marking_window_im) { 463 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 464 } else { 465 return (double) bytes_to_copy * 466 get_new_prediction(_cost_per_byte_ms_seq); 467 } 468 } 469 470 double predict_constant_other_time_ms() { 471 return get_new_prediction(_constant_other_time_ms_seq); 472 } 473 474 double predict_young_other_time_ms(size_t young_num) { 475 return (double) young_num * 476 get_new_prediction(_young_other_cost_per_region_ms_seq); 477 } 478 479 double predict_non_young_other_time_ms(size_t non_young_num) { 480 return (double) non_young_num * 481 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 482 } 483 484 double predict_base_elapsed_time_ms(size_t pending_cards); 485 double predict_base_elapsed_time_ms(size_t pending_cards, 486 size_t scanned_cards); 487 size_t predict_bytes_to_copy(HeapRegion* hr); 488 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 489 490 void set_recorded_rs_lengths(size_t rs_lengths); 491 492 uint cset_region_length() { return young_cset_region_length() + 493 old_cset_region_length(); } 494 uint young_cset_region_length() { return eden_cset_region_length() + 495 survivor_cset_region_length(); } 496 497 void record_young_free_cset_time_ms(double time_ms) { 498 _recorded_young_free_cset_time_ms = time_ms; 499 } 500 501 void record_non_young_free_cset_time_ms(double time_ms) { 502 _recorded_non_young_free_cset_time_ms = time_ms; 503 } 504 505 double predict_survivor_regions_evac_time(); 506 507 void cset_regions_freed() { 508 bool propagate = _last_gc_was_young && !_in_marking_window; 509 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 510 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 511 // also call it on any more surv rate groups 512 } 513 514 G1MMUTracker* mmu_tracker() { 515 return _mmu_tracker; 516 } 517 518 double max_pause_time_ms() { 519 return _mmu_tracker->max_gc_time() * 1000.0; 520 } 521 522 double predict_remark_time_ms() { 523 return get_new_prediction(_concurrent_mark_remark_times_ms); 524 } 525 526 double predict_cleanup_time_ms() { 527 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 528 } 529 530 // Returns an estimate of the survival rate of the region at yg-age 531 // "yg_age". 532 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 533 TruncatedSeq* seq = surv_rate_group->get_seq(age); 534 if (seq->num() == 0) 535 gclog_or_tty->print("BARF! age is %d", age); 536 guarantee( seq->num() > 0, "invariant" ); 537 double pred = get_new_prediction(seq); 538 if (pred > 1.0) 539 pred = 1.0; 540 return pred; 541 } 542 543 double predict_yg_surv_rate(int age) { 544 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 545 } 546 547 double accum_yg_surv_rate_pred(int age) { 548 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 549 } 550 551 private: 552 void print_stats(int level, const char* str, double value); 553 void print_stats(int level, const char* str, double value, int workers); 554 void print_stats(int level, const char* str, int value); 555 556 void print_par_stats(int level, const char* str, double* data, bool showDecimals = true); 557 558 double avg_value (double* data); 559 double max_value (double* data); 560 double sum_of_values (double* data); 561 double max_sum (double* data1, double* data2); 562 563 double _last_pause_time_ms; 564 565 size_t _bytes_in_collection_set_before_gc; 566 size_t _bytes_copied_during_gc; 567 568 // Used to count used bytes in CS. 569 friend class CountCSClosure; 570 571 // Statistics kept per GC stoppage, pause or full. 572 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 573 574 // Add a new GC of the given duration and end time to the record. 575 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 576 577 // The head of the list (via "next_in_collection_set()") representing the 578 // current collection set. Set from the incrementally built collection 579 // set at the start of the pause. 580 HeapRegion* _collection_set; 581 582 // The number of bytes in the collection set before the pause. Set from 583 // the incrementally built collection set at the start of an evacuation 584 // pause. 585 size_t _collection_set_bytes_used_before; 586 587 // The associated information that is maintained while the incremental 588 // collection set is being built with young regions. Used to populate 589 // the recorded info for the evacuation pause. 590 591 enum CSetBuildType { 592 Active, // We are actively building the collection set 593 Inactive // We are not actively building the collection set 594 }; 595 596 CSetBuildType _inc_cset_build_state; 597 598 // The head of the incrementally built collection set. 599 HeapRegion* _inc_cset_head; 600 601 // The tail of the incrementally built collection set. 602 HeapRegion* _inc_cset_tail; 603 604 // The number of bytes in the incrementally built collection set. 605 // Used to set _collection_set_bytes_used_before at the start of 606 // an evacuation pause. 607 size_t _inc_cset_bytes_used_before; 608 609 // Used to record the highest end of heap region in collection set 610 HeapWord* _inc_cset_max_finger; 611 612 // The RSet lengths recorded for regions in the CSet. It is updated 613 // by the thread that adds a new region to the CSet. We assume that 614 // only one thread can be allocating a new CSet region (currently, 615 // it does so after taking the Heap_lock) hence no need to 616 // synchronize updates to this field. 617 size_t _inc_cset_recorded_rs_lengths; 618 619 // A concurrent refinement thread periodcially samples the young 620 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 621 // the RSets grow. Instead of having to syncronize updates to that 622 // field we accumulate them in this field and add it to 623 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 624 ssize_t _inc_cset_recorded_rs_lengths_diffs; 625 626 // The predicted elapsed time it will take to collect the regions in 627 // the CSet. This is updated by the thread that adds a new region to 628 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 629 // MT-safety assumptions. 630 double _inc_cset_predicted_elapsed_time_ms; 631 632 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 633 double _inc_cset_predicted_elapsed_time_ms_diffs; 634 635 // Stash a pointer to the g1 heap. 636 G1CollectedHeap* _g1; 637 638 // The ratio of gc time to elapsed time, computed over recent pauses. 639 double _recent_avg_pause_time_ratio; 640 641 double recent_avg_pause_time_ratio() { 642 return _recent_avg_pause_time_ratio; 643 } 644 645 // At the end of a pause we check the heap occupancy and we decide 646 // whether we will start a marking cycle during the next pause. If 647 // we decide that we want to do that, we will set this parameter to 648 // true. So, this parameter will stay true between the end of a 649 // pause and the beginning of a subsequent pause (not necessarily 650 // the next one, see the comments on the next field) when we decide 651 // that we will indeed start a marking cycle and do the initial-mark 652 // work. 653 volatile bool _initiate_conc_mark_if_possible; 654 655 // If initiate_conc_mark_if_possible() is set at the beginning of a 656 // pause, it is a suggestion that the pause should start a marking 657 // cycle by doing the initial-mark work. However, it is possible 658 // that the concurrent marking thread is still finishing up the 659 // previous marking cycle (e.g., clearing the next marking 660 // bitmap). If that is the case we cannot start a new cycle and 661 // we'll have to wait for the concurrent marking thread to finish 662 // what it is doing. In this case we will postpone the marking cycle 663 // initiation decision for the next pause. When we eventually decide 664 // to start a cycle, we will set _during_initial_mark_pause which 665 // will stay true until the end of the initial-mark pause and it's 666 // the condition that indicates that a pause is doing the 667 // initial-mark work. 668 volatile bool _during_initial_mark_pause; 669 670 bool _last_young_gc; 671 672 // This set of variables tracks the collector efficiency, in order to 673 // determine whether we should initiate a new marking. 674 double _cur_mark_stop_world_time_ms; 675 double _mark_remark_start_sec; 676 double _mark_cleanup_start_sec; 677 double _root_region_scan_wait_time_ms; 678 679 // Update the young list target length either by setting it to the 680 // desired fixed value or by calculating it using G1's pause 681 // prediction model. If no rs_lengths parameter is passed, predict 682 // the RS lengths using the prediction model, otherwise use the 683 // given rs_lengths as the prediction. 684 void update_young_list_target_length(size_t rs_lengths = (size_t) -1); 685 686 // Calculate and return the minimum desired young list target 687 // length. This is the minimum desired young list length according 688 // to the user's inputs. 689 uint calculate_young_list_desired_min_length(uint base_min_length); 690 691 // Calculate and return the maximum desired young list target 692 // length. This is the maximum desired young list length according 693 // to the user's inputs. 694 uint calculate_young_list_desired_max_length(); 695 696 // Calculate and return the maximum young list target length that 697 // can fit into the pause time goal. The parameters are: rs_lengths 698 // represent the prediction of how large the young RSet lengths will 699 // be, base_min_length is the alreay existing number of regions in 700 // the young list, min_length and max_length are the desired min and 701 // max young list length according to the user's inputs. 702 uint calculate_young_list_target_length(size_t rs_lengths, 703 uint base_min_length, 704 uint desired_min_length, 705 uint desired_max_length); 706 707 // Check whether a given young length (young_length) fits into the 708 // given target pause time and whether the prediction for the amount 709 // of objects to be copied for the given length will fit into the 710 // given free space (expressed by base_free_regions). It is used by 711 // calculate_young_list_target_length(). 712 bool predict_will_fit(uint young_length, double base_time_ms, 713 uint base_free_regions, double target_pause_time_ms); 714 715 // Count the number of bytes used in the CS. 716 void count_CS_bytes_used(); 717 718 public: 719 720 G1CollectorPolicy(); 721 722 virtual G1CollectorPolicy* as_g1_policy() { return this; } 723 724 virtual CollectorPolicy::Name kind() { 725 return CollectorPolicy::G1CollectorPolicyKind; 726 } 727 728 // Check the current value of the young list RSet lengths and 729 // compare it against the last prediction. If the current value is 730 // higher, recalculate the young list target length prediction. 731 void revise_young_list_target_length_if_necessary(); 732 733 size_t bytes_in_collection_set() { 734 return _bytes_in_collection_set_before_gc; 735 } 736 737 // This should be called after the heap is resized. 738 void record_new_heap_size(uint new_number_of_regions); 739 740 void init(); 741 742 // Create jstat counters for the policy. 743 virtual void initialize_gc_policy_counters(); 744 745 virtual HeapWord* mem_allocate_work(size_t size, 746 bool is_tlab, 747 bool* gc_overhead_limit_was_exceeded); 748 749 // This method controls how a collector handles one or more 750 // of its generations being fully allocated. 751 virtual HeapWord* satisfy_failed_allocation(size_t size, 752 bool is_tlab); 753 754 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 755 756 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 757 758 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 759 760 // Update the heuristic info to record a collection pause of the given 761 // start time, where the given number of bytes were used at the start. 762 // This may involve changing the desired size of a collection set. 763 764 void record_stop_world_start(); 765 766 void record_collection_pause_start(double start_time_sec, size_t start_used); 767 768 // Must currently be called while the world is stopped. 769 void record_concurrent_mark_init_end(double 770 mark_init_elapsed_time_ms); 771 772 void record_root_region_scan_wait_time(double time_ms) { 773 _root_region_scan_wait_time_ms = time_ms; 774 } 775 776 void record_concurrent_mark_remark_start(); 777 void record_concurrent_mark_remark_end(); 778 779 void record_concurrent_mark_cleanup_start(); 780 void record_concurrent_mark_cleanup_end(int no_of_gc_threads); 781 void record_concurrent_mark_cleanup_completed(); 782 783 void record_concurrent_pause(); 784 void record_concurrent_pause_end(); 785 786 void record_collection_pause_end(int no_of_gc_threads); 787 void print_heap_transition(); 788 789 // Record the fact that a full collection occurred. 790 void record_full_collection_start(); 791 void record_full_collection_end(); 792 793 void record_gc_worker_start_time(int worker_i, double ms) { 794 _par_last_gc_worker_start_times_ms[worker_i] = ms; 795 } 796 797 void record_ext_root_scan_time(int worker_i, double ms) { 798 _par_last_ext_root_scan_times_ms[worker_i] = ms; 799 } 800 801 void record_satb_filtering_time(int worker_i, double ms) { 802 _par_last_satb_filtering_times_ms[worker_i] = ms; 803 } 804 805 void record_update_rs_time(int thread, double ms) { 806 _par_last_update_rs_times_ms[thread] = ms; 807 } 808 809 void record_update_rs_processed_buffers (int thread, 810 double processed_buffers) { 811 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 812 } 813 814 void record_scan_rs_time(int thread, double ms) { 815 _par_last_scan_rs_times_ms[thread] = ms; 816 } 817 818 void reset_obj_copy_time(int thread) { 819 _par_last_obj_copy_times_ms[thread] = 0.0; 820 } 821 822 void reset_obj_copy_time() { 823 reset_obj_copy_time(0); 824 } 825 826 void record_obj_copy_time(int thread, double ms) { 827 _par_last_obj_copy_times_ms[thread] += ms; 828 } 829 830 void record_termination(int thread, double ms, size_t attempts) { 831 _par_last_termination_times_ms[thread] = ms; 832 _par_last_termination_attempts[thread] = (double) attempts; 833 } 834 835 void record_gc_worker_end_time(int worker_i, double ms) { 836 _par_last_gc_worker_end_times_ms[worker_i] = ms; 837 } 838 839 void record_pause_time_ms(double ms) { 840 _last_pause_time_ms = ms; 841 } 842 843 void record_clear_ct_time(double ms) { 844 _cur_clear_ct_time_ms = ms; 845 } 846 847 void record_par_time(double ms) { 848 _cur_collection_par_time_ms = ms; 849 } 850 851 void record_code_root_fixup_time(double ms) { 852 _cur_collection_code_root_fixup_time_ms = ms; 853 } 854 855 void record_ref_proc_time(double ms) { 856 _cur_ref_proc_time_ms = ms; 857 } 858 859 void record_ref_enq_time(double ms) { 860 _cur_ref_enq_time_ms = ms; 861 } 862 863 #ifndef PRODUCT 864 void record_cc_clear_time(double ms) { 865 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) 866 _min_clear_cc_time_ms = ms; 867 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) 868 _max_clear_cc_time_ms = ms; 869 _cur_clear_cc_time_ms = ms; 870 _cum_clear_cc_time_ms += ms; 871 _num_cc_clears++; 872 } 873 #endif 874 875 // Record how much space we copied during a GC. This is typically 876 // called when a GC alloc region is being retired. 877 void record_bytes_copied_during_gc(size_t bytes) { 878 _bytes_copied_during_gc += bytes; 879 } 880 881 // The amount of space we copied during a GC. 882 size_t bytes_copied_during_gc() { 883 return _bytes_copied_during_gc; 884 } 885 886 // Determine whether there are candidate regions so that the 887 // next GC should be mixed. The two action strings are used 888 // in the ergo output when the method returns true or false. 889 bool next_gc_should_be_mixed(const char* true_action_str, 890 const char* false_action_str); 891 892 // Choose a new collection set. Marks the chosen regions as being 893 // "in_collection_set", and links them together. The head and number of 894 // the collection set are available via access methods. 895 void finalize_cset(double target_pause_time_ms); 896 897 // The head of the list (via "next_in_collection_set()") representing the 898 // current collection set. 899 HeapRegion* collection_set() { return _collection_set; } 900 901 void clear_collection_set() { _collection_set = NULL; } 902 903 // Add old region "hr" to the CSet. 904 void add_old_region_to_cset(HeapRegion* hr); 905 906 // Incremental CSet Support 907 908 // The head of the incrementally built collection set. 909 HeapRegion* inc_cset_head() { return _inc_cset_head; } 910 911 // The tail of the incrementally built collection set. 912 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 913 914 // Initialize incremental collection set info. 915 void start_incremental_cset_building(); 916 917 // Perform any final calculations on the incremental CSet fields 918 // before we can use them. 919 void finalize_incremental_cset_building(); 920 921 void clear_incremental_cset() { 922 _inc_cset_head = NULL; 923 _inc_cset_tail = NULL; 924 } 925 926 // Stop adding regions to the incremental collection set 927 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 928 929 // Add information about hr to the aggregated information for the 930 // incrementally built collection set. 931 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 932 933 // Update information about hr in the aggregated information for 934 // the incrementally built collection set. 935 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 936 937 private: 938 // Update the incremental cset information when adding a region 939 // (should not be called directly). 940 void add_region_to_incremental_cset_common(HeapRegion* hr); 941 942 public: 943 // Add hr to the LHS of the incremental collection set. 944 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 945 946 // Add hr to the RHS of the incremental collection set. 947 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 948 949 #ifndef PRODUCT 950 void print_collection_set(HeapRegion* list_head, outputStream* st); 951 #endif // !PRODUCT 952 953 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 954 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 955 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 956 957 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 958 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 959 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 960 961 // This sets the initiate_conc_mark_if_possible() flag to start a 962 // new cycle, as long as we are not already in one. It's best if it 963 // is called during a safepoint when the test whether a cycle is in 964 // progress or not is stable. 965 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 966 967 // This is called at the very beginning of an evacuation pause (it 968 // has to be the first thing that the pause does). If 969 // initiate_conc_mark_if_possible() is true, and the concurrent 970 // marking thread has completed its work during the previous cycle, 971 // it will set during_initial_mark_pause() to so that the pause does 972 // the initial-mark work and start a marking cycle. 973 void decide_on_conc_mark_initiation(); 974 975 // If an expansion would be appropriate, because recent GC overhead had 976 // exceeded the desired limit, return an amount to expand by. 977 size_t expansion_amount(); 978 979 // Print tracing information. 980 void print_tracing_info() const; 981 982 // Print stats on young survival ratio 983 void print_yg_surv_rate_info() const; 984 985 void finished_recalculating_age_indexes(bool is_survivors) { 986 if (is_survivors) { 987 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 988 } else { 989 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 990 } 991 // do that for any other surv rate groups 992 } 993 994 bool is_young_list_full() { 995 uint young_list_length = _g1->young_list()->length(); 996 uint young_list_target_length = _young_list_target_length; 997 return young_list_length >= young_list_target_length; 998 } 999 1000 bool can_expand_young_list() { 1001 uint young_list_length = _g1->young_list()->length(); 1002 uint young_list_max_length = _young_list_max_length; 1003 return young_list_length < young_list_max_length; 1004 } 1005 1006 uint young_list_max_length() { 1007 return _young_list_max_length; 1008 } 1009 1010 bool gcs_are_young() { 1011 return _gcs_are_young; 1012 } 1013 void set_gcs_are_young(bool gcs_are_young) { 1014 _gcs_are_young = gcs_are_young; 1015 } 1016 1017 bool adaptive_young_list_length() { 1018 return _young_gen_sizer->adaptive_young_list_length(); 1019 } 1020 1021 private: 1022 // 1023 // Survivor regions policy. 1024 // 1025 1026 // Current tenuring threshold, set to 0 if the collector reaches the 1027 // maximum amount of suvivors regions. 1028 int _tenuring_threshold; 1029 1030 // The limit on the number of regions allocated for survivors. 1031 uint _max_survivor_regions; 1032 1033 // For reporting purposes. 1034 size_t _eden_bytes_before_gc; 1035 size_t _survivor_bytes_before_gc; 1036 size_t _capacity_before_gc; 1037 1038 // The amount of survor regions after a collection. 1039 uint _recorded_survivor_regions; 1040 // List of survivor regions. 1041 HeapRegion* _recorded_survivor_head; 1042 HeapRegion* _recorded_survivor_tail; 1043 1044 ageTable _survivors_age_table; 1045 1046 public: 1047 1048 inline GCAllocPurpose 1049 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { 1050 if (age < _tenuring_threshold && src_region->is_young()) { 1051 return GCAllocForSurvived; 1052 } else { 1053 return GCAllocForTenured; 1054 } 1055 } 1056 1057 inline bool track_object_age(GCAllocPurpose purpose) { 1058 return purpose == GCAllocForSurvived; 1059 } 1060 1061 static const uint REGIONS_UNLIMITED = (uint) -1; 1062 1063 uint max_regions(int purpose); 1064 1065 // The limit on regions for a particular purpose is reached. 1066 void note_alloc_region_limit_reached(int purpose) { 1067 if (purpose == GCAllocForSurvived) { 1068 _tenuring_threshold = 0; 1069 } 1070 } 1071 1072 void note_start_adding_survivor_regions() { 1073 _survivor_surv_rate_group->start_adding_regions(); 1074 } 1075 1076 void note_stop_adding_survivor_regions() { 1077 _survivor_surv_rate_group->stop_adding_regions(); 1078 } 1079 1080 void record_survivor_regions(uint regions, 1081 HeapRegion* head, 1082 HeapRegion* tail) { 1083 _recorded_survivor_regions = regions; 1084 _recorded_survivor_head = head; 1085 _recorded_survivor_tail = tail; 1086 } 1087 1088 uint recorded_survivor_regions() { 1089 return _recorded_survivor_regions; 1090 } 1091 1092 void record_thread_age_table(ageTable* age_table) { 1093 _survivors_age_table.merge_par(age_table); 1094 } 1095 1096 void update_max_gc_locker_expansion(); 1097 1098 // Calculates survivor space parameters. 1099 void update_survivors_policy(); 1100 1101 }; 1102 1103 // This should move to some place more general... 1104 1105 // If we have "n" measurements, and we've kept track of their "sum" and the 1106 // "sum_of_squares" of the measurements, this returns the variance of the 1107 // sequence. 1108 inline double variance(int n, double sum_of_squares, double sum) { 1109 double n_d = (double)n; 1110 double avg = sum/n_d; 1111 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 1112 } 1113 1114 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP