1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing 41 // over and over again and introducing subtle problems through small typos and 42 // cutting and pasting mistakes. The macros below introduces a number 43 // sequnce into the following two classes and the methods that access it. 44 45 #define define_num_seq(name) \ 46 private: \ 47 NumberSeq _all_##name##_times_ms; \ 48 public: \ 49 void record_##name##_time_ms(double ms) { \ 50 _all_##name##_times_ms.add(ms); \ 51 } \ 52 NumberSeq* get_##name##_seq() { \ 53 return &_all_##name##_times_ms; \ 54 } 55 56 class MainBodySummary; 57 58 class PauseSummary: public CHeapObj { 59 define_num_seq(total) 60 define_num_seq(other) 61 62 public: 63 virtual MainBodySummary* main_body_summary() { return NULL; } 64 }; 65 66 class MainBodySummary: public CHeapObj { 67 define_num_seq(satb_drain) // optional 68 define_num_seq(parallel) // parallel only 69 define_num_seq(ext_root_scan) 70 define_num_seq(mark_stack_scan) 71 define_num_seq(update_rs) 72 define_num_seq(scan_rs) 73 define_num_seq(obj_copy) 74 define_num_seq(termination) // parallel only 75 define_num_seq(parallel_other) // parallel only 76 define_num_seq(mark_closure) 77 define_num_seq(clear_ct) // parallel only 78 }; 79 80 class Summary: public PauseSummary, 81 public MainBodySummary { 82 public: 83 virtual MainBodySummary* main_body_summary() { return this; } 84 }; 85 86 class G1CollectorPolicy: public CollectorPolicy { 87 protected: 88 // The number of pauses during the execution. 89 long _n_pauses; 90 91 // either equal to the number of parallel threads, if ParallelGCThreads 92 // has been set, or 1 otherwise 93 int _parallel_gc_threads; 94 95 enum SomePrivateConstants { 96 NumPrevPausesForHeuristics = 10 97 }; 98 99 G1MMUTracker* _mmu_tracker; 100 101 void initialize_flags(); 102 103 void initialize_all() { 104 initialize_flags(); 105 initialize_size_info(); 106 initialize_perm_generation(PermGen::MarkSweepCompact); 107 } 108 109 virtual size_t default_init_heap_size() { 110 // Pick some reasonable default. 111 return 8*M; 112 } 113 114 double _cur_collection_start_sec; 115 size_t _cur_collection_pause_used_at_start_bytes; 116 size_t _cur_collection_pause_used_regions_at_start; 117 size_t _prev_collection_pause_used_at_end_bytes; 118 double _cur_collection_par_time_ms; 119 double _cur_satb_drain_time_ms; 120 double _cur_clear_ct_time_ms; 121 bool _satb_drain_time_set; 122 double _cur_ref_proc_time_ms; 123 double _cur_ref_enq_time_ms; 124 125 #ifndef PRODUCT 126 // Card Table Count Cache stats 127 double _min_clear_cc_time_ms; // min 128 double _max_clear_cc_time_ms; // max 129 double _cur_clear_cc_time_ms; // clearing time during current pause 130 double _cum_clear_cc_time_ms; // cummulative clearing time 131 jlong _num_cc_clears; // number of times the card count cache has been cleared 132 #endif 133 134 // Statistics for recent GC pauses. See below for how indexed. 135 TruncatedSeq* _recent_rs_scan_times_ms; 136 137 // These exclude marking times. 138 TruncatedSeq* _recent_pause_times_ms; 139 TruncatedSeq* _recent_gc_times_ms; 140 141 TruncatedSeq* _recent_CS_bytes_used_before; 142 TruncatedSeq* _recent_CS_bytes_surviving; 143 144 TruncatedSeq* _recent_rs_sizes; 145 146 TruncatedSeq* _concurrent_mark_remark_times_ms; 147 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 148 149 Summary* _summary; 150 151 NumberSeq* _all_pause_times_ms; 152 NumberSeq* _all_full_gc_times_ms; 153 double _stop_world_start; 154 NumberSeq* _all_stop_world_times_ms; 155 NumberSeq* _all_yield_times_ms; 156 157 size_t _region_num_young; 158 size_t _region_num_tenured; 159 size_t _prev_region_num_young; 160 size_t _prev_region_num_tenured; 161 162 NumberSeq* _all_mod_union_times_ms; 163 164 int _aux_num; 165 NumberSeq* _all_aux_times_ms; 166 double* _cur_aux_start_times_ms; 167 double* _cur_aux_times_ms; 168 bool* _cur_aux_times_set; 169 170 double* _par_last_gc_worker_start_times_ms; 171 double* _par_last_ext_root_scan_times_ms; 172 double* _par_last_mark_stack_scan_times_ms; 173 double* _par_last_update_rs_times_ms; 174 double* _par_last_update_rs_processed_buffers; 175 double* _par_last_scan_rs_times_ms; 176 double* _par_last_obj_copy_times_ms; 177 double* _par_last_termination_times_ms; 178 double* _par_last_termination_attempts; 179 double* _par_last_gc_worker_end_times_ms; 180 double* _par_last_gc_worker_times_ms; 181 182 // indicates whether we are in full young or partially young GC mode 183 bool _full_young_gcs; 184 185 // if true, then it tries to dynamically adjust the length of the 186 // young list 187 bool _adaptive_young_list_length; 188 size_t _young_list_target_length; 189 size_t _young_list_fixed_length; 190 size_t _prev_eden_capacity; // used for logging 191 192 // The max number of regions we can extend the eden by while the GC 193 // locker is active. This should be >= _young_list_target_length; 194 size_t _young_list_max_length; 195 196 size_t _young_cset_length; 197 bool _last_young_gc_full; 198 199 unsigned _full_young_pause_num; 200 unsigned _partial_young_pause_num; 201 202 bool _during_marking; 203 bool _in_marking_window; 204 bool _in_marking_window_im; 205 206 SurvRateGroup* _short_lived_surv_rate_group; 207 SurvRateGroup* _survivor_surv_rate_group; 208 // add here any more surv rate groups 209 210 double _gc_overhead_perc; 211 212 double _reserve_factor; 213 size_t _reserve_regions; 214 215 bool during_marking() { 216 return _during_marking; 217 } 218 219 // <NEW PREDICTION> 220 221 private: 222 enum PredictionConstants { 223 TruncatedSeqLength = 10 224 }; 225 226 TruncatedSeq* _alloc_rate_ms_seq; 227 double _prev_collection_pause_end_ms; 228 229 TruncatedSeq* _pending_card_diff_seq; 230 TruncatedSeq* _rs_length_diff_seq; 231 TruncatedSeq* _cost_per_card_ms_seq; 232 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; 233 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; 234 TruncatedSeq* _cost_per_entry_ms_seq; 235 TruncatedSeq* _partially_young_cost_per_entry_ms_seq; 236 TruncatedSeq* _cost_per_byte_ms_seq; 237 TruncatedSeq* _constant_other_time_ms_seq; 238 TruncatedSeq* _young_other_cost_per_region_ms_seq; 239 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 240 241 TruncatedSeq* _pending_cards_seq; 242 TruncatedSeq* _scanned_cards_seq; 243 TruncatedSeq* _rs_lengths_seq; 244 245 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 246 247 TruncatedSeq* _young_gc_eff_seq; 248 249 TruncatedSeq* _max_conc_overhead_seq; 250 251 bool _using_new_ratio_calculations; 252 size_t _min_desired_young_length; // as set on the command line or default calculations 253 size_t _max_desired_young_length; // as set on the command line or default calculations 254 255 size_t _recorded_young_regions; 256 size_t _recorded_non_young_regions; 257 size_t _recorded_region_num; 258 259 size_t _free_regions_at_end_of_collection; 260 261 size_t _recorded_rs_lengths; 262 size_t _max_rs_lengths; 263 264 size_t _recorded_marked_bytes; 265 size_t _recorded_young_bytes; 266 267 size_t _predicted_pending_cards; 268 size_t _predicted_cards_scanned; 269 size_t _predicted_rs_lengths; 270 size_t _predicted_bytes_to_copy; 271 272 double _predicted_survival_ratio; 273 double _predicted_rs_update_time_ms; 274 double _predicted_rs_scan_time_ms; 275 double _predicted_object_copy_time_ms; 276 double _predicted_constant_other_time_ms; 277 double _predicted_young_other_time_ms; 278 double _predicted_non_young_other_time_ms; 279 double _predicted_pause_time_ms; 280 281 double _vtime_diff_ms; 282 283 double _recorded_young_free_cset_time_ms; 284 double _recorded_non_young_free_cset_time_ms; 285 286 double _sigma; 287 double _expensive_region_limit_ms; 288 289 size_t _rs_lengths_prediction; 290 291 size_t _known_garbage_bytes; 292 double _known_garbage_ratio; 293 294 double sigma() { 295 return _sigma; 296 } 297 298 // A function that prevents us putting too much stock in small sample 299 // sets. Returns a number between 2.0 and 1.0, depending on the number 300 // of samples. 5 or more samples yields one; fewer scales linearly from 301 // 2.0 at 1 sample to 1.0 at 5. 302 double confidence_factor(int samples) { 303 if (samples > 4) return 1.0; 304 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 305 } 306 307 double get_new_neg_prediction(TruncatedSeq* seq) { 308 return seq->davg() - sigma() * seq->dsd(); 309 } 310 311 #ifndef PRODUCT 312 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 313 #endif // PRODUCT 314 315 void adjust_concurrent_refinement(double update_rs_time, 316 double update_rs_processed_buffers, 317 double goal_ms); 318 319 protected: 320 double _pause_time_target_ms; 321 double _recorded_young_cset_choice_time_ms; 322 double _recorded_non_young_cset_choice_time_ms; 323 bool _within_target; 324 size_t _pending_cards; 325 size_t _max_pending_cards; 326 327 public: 328 329 void set_region_short_lived(HeapRegion* hr) { 330 hr->install_surv_rate_group(_short_lived_surv_rate_group); 331 } 332 333 void set_region_survivors(HeapRegion* hr) { 334 hr->install_surv_rate_group(_survivor_surv_rate_group); 335 } 336 337 #ifndef PRODUCT 338 bool verify_young_ages(); 339 #endif // PRODUCT 340 341 double get_new_prediction(TruncatedSeq* seq) { 342 return MAX2(seq->davg() + sigma() * seq->dsd(), 343 seq->davg() * confidence_factor(seq->num())); 344 } 345 346 size_t young_cset_length() { 347 return _young_cset_length; 348 } 349 350 void record_max_rs_lengths(size_t rs_lengths) { 351 _max_rs_lengths = rs_lengths; 352 } 353 354 size_t predict_pending_card_diff() { 355 double prediction = get_new_neg_prediction(_pending_card_diff_seq); 356 if (prediction < 0.00001) 357 return 0; 358 else 359 return (size_t) prediction; 360 } 361 362 size_t predict_pending_cards() { 363 size_t max_pending_card_num = _g1->max_pending_card_num(); 364 size_t diff = predict_pending_card_diff(); 365 size_t prediction; 366 if (diff > max_pending_card_num) 367 prediction = max_pending_card_num; 368 else 369 prediction = max_pending_card_num - diff; 370 371 return prediction; 372 } 373 374 size_t predict_rs_length_diff() { 375 return (size_t) get_new_prediction(_rs_length_diff_seq); 376 } 377 378 double predict_alloc_rate_ms() { 379 return get_new_prediction(_alloc_rate_ms_seq); 380 } 381 382 double predict_cost_per_card_ms() { 383 return get_new_prediction(_cost_per_card_ms_seq); 384 } 385 386 double predict_rs_update_time_ms(size_t pending_cards) { 387 return (double) pending_cards * predict_cost_per_card_ms(); 388 } 389 390 double predict_fully_young_cards_per_entry_ratio() { 391 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq); 392 } 393 394 double predict_partially_young_cards_per_entry_ratio() { 395 if (_partially_young_cards_per_entry_ratio_seq->num() < 2) 396 return predict_fully_young_cards_per_entry_ratio(); 397 else 398 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq); 399 } 400 401 size_t predict_young_card_num(size_t rs_length) { 402 return (size_t) ((double) rs_length * 403 predict_fully_young_cards_per_entry_ratio()); 404 } 405 406 size_t predict_non_young_card_num(size_t rs_length) { 407 return (size_t) ((double) rs_length * 408 predict_partially_young_cards_per_entry_ratio()); 409 } 410 411 double predict_rs_scan_time_ms(size_t card_num) { 412 if (full_young_gcs()) 413 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 414 else 415 return predict_partially_young_rs_scan_time_ms(card_num); 416 } 417 418 double predict_partially_young_rs_scan_time_ms(size_t card_num) { 419 if (_partially_young_cost_per_entry_ms_seq->num() < 3) 420 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 421 else 422 return (double) card_num * 423 get_new_prediction(_partially_young_cost_per_entry_ms_seq); 424 } 425 426 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 427 if (_cost_per_byte_ms_during_cm_seq->num() < 3) 428 return 1.1 * (double) bytes_to_copy * 429 get_new_prediction(_cost_per_byte_ms_seq); 430 else 431 return (double) bytes_to_copy * 432 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 433 } 434 435 double predict_object_copy_time_ms(size_t bytes_to_copy) { 436 if (_in_marking_window && !_in_marking_window_im) 437 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 438 else 439 return (double) bytes_to_copy * 440 get_new_prediction(_cost_per_byte_ms_seq); 441 } 442 443 double predict_constant_other_time_ms() { 444 return get_new_prediction(_constant_other_time_ms_seq); 445 } 446 447 double predict_young_other_time_ms(size_t young_num) { 448 return 449 (double) young_num * 450 get_new_prediction(_young_other_cost_per_region_ms_seq); 451 } 452 453 double predict_non_young_other_time_ms(size_t non_young_num) { 454 return 455 (double) non_young_num * 456 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 457 } 458 459 void check_if_region_is_too_expensive(double predicted_time_ms); 460 461 double predict_young_collection_elapsed_time_ms(size_t adjustment); 462 double predict_base_elapsed_time_ms(size_t pending_cards); 463 double predict_base_elapsed_time_ms(size_t pending_cards, 464 size_t scanned_cards); 465 size_t predict_bytes_to_copy(HeapRegion* hr); 466 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 467 468 void start_recording_regions(); 469 void record_cset_region_info(HeapRegion* hr, bool young); 470 void record_non_young_cset_region(HeapRegion* hr); 471 472 void set_recorded_young_regions(size_t n_regions); 473 void set_recorded_young_bytes(size_t bytes); 474 void set_recorded_rs_lengths(size_t rs_lengths); 475 void set_predicted_bytes_to_copy(size_t bytes); 476 477 void end_recording_regions(); 478 479 void record_vtime_diff_ms(double vtime_diff_ms) { 480 _vtime_diff_ms = vtime_diff_ms; 481 } 482 483 void record_young_free_cset_time_ms(double time_ms) { 484 _recorded_young_free_cset_time_ms = time_ms; 485 } 486 487 void record_non_young_free_cset_time_ms(double time_ms) { 488 _recorded_non_young_free_cset_time_ms = time_ms; 489 } 490 491 double predict_young_gc_eff() { 492 return get_new_neg_prediction(_young_gc_eff_seq); 493 } 494 495 double predict_survivor_regions_evac_time(); 496 497 // </NEW PREDICTION> 498 499 void cset_regions_freed() { 500 bool propagate = _last_young_gc_full && !_in_marking_window; 501 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 502 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 503 // also call it on any more surv rate groups 504 } 505 506 void set_known_garbage_bytes(size_t known_garbage_bytes) { 507 _known_garbage_bytes = known_garbage_bytes; 508 size_t heap_bytes = _g1->capacity(); 509 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 510 } 511 512 void decrease_known_garbage_bytes(size_t known_garbage_bytes) { 513 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); 514 515 _known_garbage_bytes -= known_garbage_bytes; 516 size_t heap_bytes = _g1->capacity(); 517 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 518 } 519 520 G1MMUTracker* mmu_tracker() { 521 return _mmu_tracker; 522 } 523 524 double max_pause_time_ms() { 525 return _mmu_tracker->max_gc_time() * 1000.0; 526 } 527 528 double predict_remark_time_ms() { 529 return get_new_prediction(_concurrent_mark_remark_times_ms); 530 } 531 532 double predict_cleanup_time_ms() { 533 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 534 } 535 536 // Returns an estimate of the survival rate of the region at yg-age 537 // "yg_age". 538 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 539 TruncatedSeq* seq = surv_rate_group->get_seq(age); 540 if (seq->num() == 0) 541 gclog_or_tty->print("BARF! age is %d", age); 542 guarantee( seq->num() > 0, "invariant" ); 543 double pred = get_new_prediction(seq); 544 if (pred > 1.0) 545 pred = 1.0; 546 return pred; 547 } 548 549 double predict_yg_surv_rate(int age) { 550 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 551 } 552 553 double accum_yg_surv_rate_pred(int age) { 554 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 555 } 556 557 protected: 558 void print_stats(int level, const char* str, double value); 559 void print_stats(int level, const char* str, int value); 560 561 void print_par_stats(int level, const char* str, double* data); 562 void print_par_sizes(int level, const char* str, double* data); 563 564 void check_other_times(int level, 565 NumberSeq* other_times_ms, 566 NumberSeq* calc_other_times_ms) const; 567 568 void print_summary (PauseSummary* stats) const; 569 570 void print_summary (int level, const char* str, NumberSeq* seq) const; 571 void print_summary_sd (int level, const char* str, NumberSeq* seq) const; 572 573 double avg_value (double* data); 574 double max_value (double* data); 575 double sum_of_values (double* data); 576 double max_sum (double* data1, double* data2); 577 578 int _last_satb_drain_processed_buffers; 579 int _last_update_rs_processed_buffers; 580 double _last_pause_time_ms; 581 582 size_t _bytes_in_collection_set_before_gc; 583 size_t _bytes_copied_during_gc; 584 585 // Used to count used bytes in CS. 586 friend class CountCSClosure; 587 588 // Statistics kept per GC stoppage, pause or full. 589 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 590 591 // We track markings. 592 int _num_markings; 593 double _mark_thread_startup_sec; // Time at startup of marking thread 594 595 // Add a new GC of the given duration and end time to the record. 596 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 597 598 // The head of the list (via "next_in_collection_set()") representing the 599 // current collection set. Set from the incrementally built collection 600 // set at the start of the pause. 601 HeapRegion* _collection_set; 602 603 // The number of regions in the collection set. Set from the incrementally 604 // built collection set at the start of an evacuation pause. 605 size_t _collection_set_size; 606 607 // The number of bytes in the collection set before the pause. Set from 608 // the incrementally built collection set at the start of an evacuation 609 // pause. 610 size_t _collection_set_bytes_used_before; 611 612 // The associated information that is maintained while the incremental 613 // collection set is being built with young regions. Used to populate 614 // the recorded info for the evacuation pause. 615 616 enum CSetBuildType { 617 Active, // We are actively building the collection set 618 Inactive // We are not actively building the collection set 619 }; 620 621 CSetBuildType _inc_cset_build_state; 622 623 // The head of the incrementally built collection set. 624 HeapRegion* _inc_cset_head; 625 626 // The tail of the incrementally built collection set. 627 HeapRegion* _inc_cset_tail; 628 629 // The number of regions in the incrementally built collection set. 630 // Used to set _collection_set_size at the start of an evacuation 631 // pause. 632 size_t _inc_cset_size; 633 634 // Used as the index in the surving young words structure 635 // which tracks the amount of space, for each young region, 636 // that survives the pause. 637 size_t _inc_cset_young_index; 638 639 // The number of bytes in the incrementally built collection set. 640 // Used to set _collection_set_bytes_used_before at the start of 641 // an evacuation pause. 642 size_t _inc_cset_bytes_used_before; 643 644 // Used to record the highest end of heap region in collection set 645 HeapWord* _inc_cset_max_finger; 646 647 // The number of recorded used bytes in the young regions 648 // of the collection set. This is the sum of the used() bytes 649 // of retired young regions in the collection set. 650 size_t _inc_cset_recorded_young_bytes; 651 652 // The RSet lengths recorded for regions in the collection set 653 // (updated by the periodic sampling of the regions in the 654 // young list/collection set). 655 size_t _inc_cset_recorded_rs_lengths; 656 657 // The predicted elapsed time it will take to collect the regions 658 // in the collection set (updated by the periodic sampling of the 659 // regions in the young list/collection set). 660 double _inc_cset_predicted_elapsed_time_ms; 661 662 // The predicted bytes to copy for the regions in the collection 663 // set (updated by the periodic sampling of the regions in the 664 // young list/collection set). 665 size_t _inc_cset_predicted_bytes_to_copy; 666 667 // Info about marking. 668 int _n_marks; // Sticky at 2, so we know when we've done at least 2. 669 670 // The number of collection pauses at the end of the last mark. 671 size_t _n_pauses_at_mark_end; 672 673 // Stash a pointer to the g1 heap. 674 G1CollectedHeap* _g1; 675 676 // The average time in ms per collection pause, averaged over recent pauses. 677 double recent_avg_time_for_pauses_ms(); 678 679 // The average time in ms for RS scanning, per pause, averaged 680 // over recent pauses. (Note the RS scanning time for a pause 681 // is itself an average of the RS scanning time for each worker 682 // thread.) 683 double recent_avg_time_for_rs_scan_ms(); 684 685 // The number of "recent" GCs recorded in the number sequences 686 int number_of_recent_gcs(); 687 688 // The average survival ratio, computed by the total number of bytes 689 // suriviving / total number of bytes before collection over the last 690 // several recent pauses. 691 double recent_avg_survival_fraction(); 692 // The survival fraction of the most recent pause; if there have been no 693 // pauses, returns 1.0. 694 double last_survival_fraction(); 695 696 // Returns a "conservative" estimate of the recent survival rate, i.e., 697 // one that may be higher than "recent_avg_survival_fraction". 698 // This is conservative in several ways: 699 // If there have been few pauses, it will assume a potential high 700 // variance, and err on the side of caution. 701 // It puts a lower bound (currently 0.1) on the value it will return. 702 // To try to detect phase changes, if the most recent pause ("latest") has a 703 // higher-than average ("avg") survival rate, it returns that rate. 704 // "work" version is a utility function; young is restricted to young regions. 705 double conservative_avg_survival_fraction_work(double avg, 706 double latest); 707 708 // The arguments are the two sequences that keep track of the number of bytes 709 // surviving and the total number of bytes before collection, resp., 710 // over the last evereal recent pauses 711 // Returns the survival rate for the category in the most recent pause. 712 // If there have been no pauses, returns 1.0. 713 double last_survival_fraction_work(TruncatedSeq* surviving, 714 TruncatedSeq* before); 715 716 // The arguments are the two sequences that keep track of the number of bytes 717 // surviving and the total number of bytes before collection, resp., 718 // over the last several recent pauses 719 // Returns the average survival ration over the last several recent pauses 720 // If there have been no pauses, return 1.0 721 double recent_avg_survival_fraction_work(TruncatedSeq* surviving, 722 TruncatedSeq* before); 723 724 double conservative_avg_survival_fraction() { 725 double avg = recent_avg_survival_fraction(); 726 double latest = last_survival_fraction(); 727 return conservative_avg_survival_fraction_work(avg, latest); 728 } 729 730 // The ratio of gc time to elapsed time, computed over recent pauses. 731 double _recent_avg_pause_time_ratio; 732 733 double recent_avg_pause_time_ratio() { 734 return _recent_avg_pause_time_ratio; 735 } 736 737 // Number of pauses between concurrent marking. 738 size_t _pauses_btwn_concurrent_mark; 739 740 size_t _n_marks_since_last_pause; 741 742 // At the end of a pause we check the heap occupancy and we decide 743 // whether we will start a marking cycle during the next pause. If 744 // we decide that we want to do that, we will set this parameter to 745 // true. So, this parameter will stay true between the end of a 746 // pause and the beginning of a subsequent pause (not necessarily 747 // the next one, see the comments on the next field) when we decide 748 // that we will indeed start a marking cycle and do the initial-mark 749 // work. 750 volatile bool _initiate_conc_mark_if_possible; 751 752 // If initiate_conc_mark_if_possible() is set at the beginning of a 753 // pause, it is a suggestion that the pause should start a marking 754 // cycle by doing the initial-mark work. However, it is possible 755 // that the concurrent marking thread is still finishing up the 756 // previous marking cycle (e.g., clearing the next marking 757 // bitmap). If that is the case we cannot start a new cycle and 758 // we'll have to wait for the concurrent marking thread to finish 759 // what it is doing. In this case we will postpone the marking cycle 760 // initiation decision for the next pause. When we eventually decide 761 // to start a cycle, we will set _during_initial_mark_pause which 762 // will stay true until the end of the initial-mark pause and it's 763 // the condition that indicates that a pause is doing the 764 // initial-mark work. 765 volatile bool _during_initial_mark_pause; 766 767 bool _should_revert_to_full_young_gcs; 768 bool _last_full_young_gc; 769 770 // This set of variables tracks the collector efficiency, in order to 771 // determine whether we should initiate a new marking. 772 double _cur_mark_stop_world_time_ms; 773 double _mark_remark_start_sec; 774 double _mark_cleanup_start_sec; 775 double _mark_closure_time_ms; 776 777 // Update the young list target length either by setting it to the 778 // desired fixed value or by calculating it using G1's pause 779 // prediction model. If no rs_lengths parameter is passed, predict 780 // the RS lengths using the prediction model, otherwise use the 781 // given rs_lengths as the prediction. 782 void update_young_list_target_length(size_t rs_lengths = (size_t) -1); 783 784 // Calculate and return the minimum desired young list target 785 // length. This is the minimum desired young list length according 786 // to the user's inputs. 787 size_t calculate_young_list_desired_min_length(size_t base_min_length); 788 789 // Calculate and return the maximum desired young list target 790 // length. This is the maximum desired young list length according 791 // to the user's inputs. 792 size_t calculate_young_list_desired_max_length(); 793 794 // Calculate and return the maximum young list target length that 795 // can fit into the pause time goal. The parameters are: rs_lengths 796 // represent the prediction of how large the young RSet lengths will 797 // be, base_min_length is the alreay existing number of regions in 798 // the young list, min_length and max_length are the desired min and 799 // max young list length according to the user's inputs. 800 size_t calculate_young_list_target_length(size_t rs_lengths, 801 size_t base_min_length, 802 size_t desired_min_length, 803 size_t desired_max_length); 804 805 // Check whether a given young length (young_length) fits into the 806 // given target pause time and whether the prediction for the amount 807 // of objects to be copied for the given length will fit into the 808 // given free space (expressed by base_free_regions). It is used by 809 // calculate_young_list_target_length(). 810 bool predict_will_fit(size_t young_length, double base_time_ms, 811 size_t base_free_regions, double target_pause_time_ms); 812 813 public: 814 815 G1CollectorPolicy(); 816 817 virtual G1CollectorPolicy* as_g1_policy() { return this; } 818 819 virtual CollectorPolicy::Name kind() { 820 return CollectorPolicy::G1CollectorPolicyKind; 821 } 822 823 // Check the current value of the young list RSet lengths and 824 // compare it against the last prediction. If the current value is 825 // higher, recalculate the young list target length prediction. 826 void revise_young_list_target_length_if_necessary(); 827 828 size_t bytes_in_collection_set() { 829 return _bytes_in_collection_set_before_gc; 830 } 831 832 unsigned calc_gc_alloc_time_stamp() { 833 return _all_pause_times_ms->num() + 1; 834 } 835 836 // This should be called after the heap is resized. 837 void record_new_heap_size(size_t new_number_of_regions); 838 839 protected: 840 841 // Count the number of bytes used in the CS. 842 void count_CS_bytes_used(); 843 844 // Together these do the base cleanup-recording work. Subclasses might 845 // want to put something between them. 846 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, 847 size_t max_live_bytes); 848 void record_concurrent_mark_cleanup_end_work2(); 849 850 void update_young_list_size_using_newratio(size_t number_of_heap_regions); 851 852 public: 853 854 virtual void init(); 855 856 // Create jstat counters for the policy. 857 virtual void initialize_gc_policy_counters(); 858 859 virtual HeapWord* mem_allocate_work(size_t size, 860 bool is_tlab, 861 bool* gc_overhead_limit_was_exceeded); 862 863 // This method controls how a collector handles one or more 864 // of its generations being fully allocated. 865 virtual HeapWord* satisfy_failed_allocation(size_t size, 866 bool is_tlab); 867 868 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 869 870 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 871 872 // The number of collection pauses so far. 873 long n_pauses() const { return _n_pauses; } 874 875 // Update the heuristic info to record a collection pause of the given 876 // start time, where the given number of bytes were used at the start. 877 // This may involve changing the desired size of a collection set. 878 879 virtual void record_stop_world_start(); 880 881 virtual void record_collection_pause_start(double start_time_sec, 882 size_t start_used); 883 884 // Must currently be called while the world is stopped. 885 void record_concurrent_mark_init_end(double 886 mark_init_elapsed_time_ms); 887 888 void record_mark_closure_time(double mark_closure_time_ms); 889 890 virtual void record_concurrent_mark_remark_start(); 891 virtual void record_concurrent_mark_remark_end(); 892 893 virtual void record_concurrent_mark_cleanup_start(); 894 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 895 size_t max_live_bytes); 896 virtual void record_concurrent_mark_cleanup_completed(); 897 898 virtual void record_concurrent_pause(); 899 virtual void record_concurrent_pause_end(); 900 901 virtual void record_collection_pause_end(); 902 void print_heap_transition(); 903 904 // Record the fact that a full collection occurred. 905 virtual void record_full_collection_start(); 906 virtual void record_full_collection_end(); 907 908 void record_gc_worker_start_time(int worker_i, double ms) { 909 _par_last_gc_worker_start_times_ms[worker_i] = ms; 910 } 911 912 void record_ext_root_scan_time(int worker_i, double ms) { 913 _par_last_ext_root_scan_times_ms[worker_i] = ms; 914 } 915 916 void record_mark_stack_scan_time(int worker_i, double ms) { 917 _par_last_mark_stack_scan_times_ms[worker_i] = ms; 918 } 919 920 void record_satb_drain_time(double ms) { 921 _cur_satb_drain_time_ms = ms; 922 _satb_drain_time_set = true; 923 } 924 925 void record_satb_drain_processed_buffers (int processed_buffers) { 926 _last_satb_drain_processed_buffers = processed_buffers; 927 } 928 929 void record_mod_union_time(double ms) { 930 _all_mod_union_times_ms->add(ms); 931 } 932 933 void record_update_rs_time(int thread, double ms) { 934 _par_last_update_rs_times_ms[thread] = ms; 935 } 936 937 void record_update_rs_processed_buffers (int thread, 938 double processed_buffers) { 939 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 940 } 941 942 void record_scan_rs_time(int thread, double ms) { 943 _par_last_scan_rs_times_ms[thread] = ms; 944 } 945 946 void reset_obj_copy_time(int thread) { 947 _par_last_obj_copy_times_ms[thread] = 0.0; 948 } 949 950 void reset_obj_copy_time() { 951 reset_obj_copy_time(0); 952 } 953 954 void record_obj_copy_time(int thread, double ms) { 955 _par_last_obj_copy_times_ms[thread] += ms; 956 } 957 958 void record_termination(int thread, double ms, size_t attempts) { 959 _par_last_termination_times_ms[thread] = ms; 960 _par_last_termination_attempts[thread] = (double) attempts; 961 } 962 963 void record_gc_worker_end_time(int worker_i, double ms) { 964 _par_last_gc_worker_end_times_ms[worker_i] = ms; 965 } 966 967 void record_pause_time_ms(double ms) { 968 _last_pause_time_ms = ms; 969 } 970 971 void record_clear_ct_time(double ms) { 972 _cur_clear_ct_time_ms = ms; 973 } 974 975 void record_par_time(double ms) { 976 _cur_collection_par_time_ms = ms; 977 } 978 979 void record_aux_start_time(int i) { 980 guarantee(i < _aux_num, "should be within range"); 981 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0; 982 } 983 984 void record_aux_end_time(int i) { 985 guarantee(i < _aux_num, "should be within range"); 986 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i]; 987 _cur_aux_times_set[i] = true; 988 _cur_aux_times_ms[i] += ms; 989 } 990 991 void record_ref_proc_time(double ms) { 992 _cur_ref_proc_time_ms = ms; 993 } 994 995 void record_ref_enq_time(double ms) { 996 _cur_ref_enq_time_ms = ms; 997 } 998 999 #ifndef PRODUCT 1000 void record_cc_clear_time(double ms) { 1001 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) 1002 _min_clear_cc_time_ms = ms; 1003 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) 1004 _max_clear_cc_time_ms = ms; 1005 _cur_clear_cc_time_ms = ms; 1006 _cum_clear_cc_time_ms += ms; 1007 _num_cc_clears++; 1008 } 1009 #endif 1010 1011 // Record how much space we copied during a GC. This is typically 1012 // called when a GC alloc region is being retired. 1013 void record_bytes_copied_during_gc(size_t bytes) { 1014 _bytes_copied_during_gc += bytes; 1015 } 1016 1017 // The amount of space we copied during a GC. 1018 size_t bytes_copied_during_gc() { 1019 return _bytes_copied_during_gc; 1020 } 1021 1022 // Choose a new collection set. Marks the chosen regions as being 1023 // "in_collection_set", and links them together. The head and number of 1024 // the collection set are available via access methods. 1025 virtual void choose_collection_set(double target_pause_time_ms) = 0; 1026 1027 // The head of the list (via "next_in_collection_set()") representing the 1028 // current collection set. 1029 HeapRegion* collection_set() { return _collection_set; } 1030 1031 void clear_collection_set() { _collection_set = NULL; } 1032 1033 // The number of elements in the current collection set. 1034 size_t collection_set_size() { return _collection_set_size; } 1035 1036 // Add "hr" to the CS. 1037 void add_to_collection_set(HeapRegion* hr); 1038 1039 // Incremental CSet Support 1040 1041 // The head of the incrementally built collection set. 1042 HeapRegion* inc_cset_head() { return _inc_cset_head; } 1043 1044 // The tail of the incrementally built collection set. 1045 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 1046 1047 // The number of elements in the incrementally built collection set. 1048 size_t inc_cset_size() { return _inc_cset_size; } 1049 1050 // Initialize incremental collection set info. 1051 void start_incremental_cset_building(); 1052 1053 void clear_incremental_cset() { 1054 _inc_cset_head = NULL; 1055 _inc_cset_tail = NULL; 1056 } 1057 1058 // Stop adding regions to the incremental collection set 1059 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 1060 1061 // Add/remove information about hr to the aggregated information 1062 // for the incrementally built collection set. 1063 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 1064 void remove_from_incremental_cset_info(HeapRegion* hr); 1065 1066 // Update information about hr in the aggregated information for 1067 // the incrementally built collection set. 1068 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 1069 1070 private: 1071 // Update the incremental cset information when adding a region 1072 // (should not be called directly). 1073 void add_region_to_incremental_cset_common(HeapRegion* hr); 1074 1075 public: 1076 // Add hr to the LHS of the incremental collection set. 1077 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 1078 1079 // Add hr to the RHS of the incremental collection set. 1080 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 1081 1082 #ifndef PRODUCT 1083 void print_collection_set(HeapRegion* list_head, outputStream* st); 1084 #endif // !PRODUCT 1085 1086 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 1087 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 1088 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 1089 1090 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 1091 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 1092 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 1093 1094 // This sets the initiate_conc_mark_if_possible() flag to start a 1095 // new cycle, as long as we are not already in one. It's best if it 1096 // is called during a safepoint when the test whether a cycle is in 1097 // progress or not is stable. 1098 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 1099 1100 // This is called at the very beginning of an evacuation pause (it 1101 // has to be the first thing that the pause does). If 1102 // initiate_conc_mark_if_possible() is true, and the concurrent 1103 // marking thread has completed its work during the previous cycle, 1104 // it will set during_initial_mark_pause() to so that the pause does 1105 // the initial-mark work and start a marking cycle. 1106 void decide_on_conc_mark_initiation(); 1107 1108 // If an expansion would be appropriate, because recent GC overhead had 1109 // exceeded the desired limit, return an amount to expand by. 1110 virtual size_t expansion_amount(); 1111 1112 // note start of mark thread 1113 void note_start_of_mark_thread(); 1114 1115 // The marked bytes of the "r" has changed; reclassify it's desirability 1116 // for marking. Also asserts that "r" is eligible for a CS. 1117 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0; 1118 1119 #ifndef PRODUCT 1120 // Check any appropriate marked bytes info, asserting false if 1121 // something's wrong, else returning "true". 1122 virtual bool assertMarkedBytesDataOK() = 0; 1123 #endif 1124 1125 // Print tracing information. 1126 void print_tracing_info() const; 1127 1128 // Print stats on young survival ratio 1129 void print_yg_surv_rate_info() const; 1130 1131 void finished_recalculating_age_indexes(bool is_survivors) { 1132 if (is_survivors) { 1133 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 1134 } else { 1135 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 1136 } 1137 // do that for any other surv rate groups 1138 } 1139 1140 bool is_young_list_full() { 1141 size_t young_list_length = _g1->young_list()->length(); 1142 size_t young_list_target_length = _young_list_target_length; 1143 return young_list_length >= young_list_target_length; 1144 } 1145 1146 bool can_expand_young_list() { 1147 size_t young_list_length = _g1->young_list()->length(); 1148 size_t young_list_max_length = _young_list_max_length; 1149 return young_list_length < young_list_max_length; 1150 } 1151 1152 void update_region_num(bool young); 1153 1154 bool full_young_gcs() { 1155 return _full_young_gcs; 1156 } 1157 void set_full_young_gcs(bool full_young_gcs) { 1158 _full_young_gcs = full_young_gcs; 1159 } 1160 1161 bool adaptive_young_list_length() { 1162 return _adaptive_young_list_length; 1163 } 1164 void set_adaptive_young_list_length(bool adaptive_young_list_length) { 1165 _adaptive_young_list_length = adaptive_young_list_length; 1166 } 1167 1168 inline double get_gc_eff_factor() { 1169 double ratio = _known_garbage_ratio; 1170 1171 double square = ratio * ratio; 1172 // square = square * square; 1173 double ret = square * 9.0 + 1.0; 1174 #if 0 1175 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); 1176 #endif // 0 1177 guarantee(0.0 <= ret && ret < 10.0, "invariant!"); 1178 return ret; 1179 } 1180 1181 // 1182 // Survivor regions policy. 1183 // 1184 protected: 1185 1186 // Current tenuring threshold, set to 0 if the collector reaches the 1187 // maximum amount of suvivors regions. 1188 int _tenuring_threshold; 1189 1190 // The limit on the number of regions allocated for survivors. 1191 size_t _max_survivor_regions; 1192 1193 // For reporting purposes. 1194 size_t _eden_bytes_before_gc; 1195 size_t _survivor_bytes_before_gc; 1196 size_t _capacity_before_gc; 1197 1198 // The amount of survor regions after a collection. 1199 size_t _recorded_survivor_regions; 1200 // List of survivor regions. 1201 HeapRegion* _recorded_survivor_head; 1202 HeapRegion* _recorded_survivor_tail; 1203 1204 ageTable _survivors_age_table; 1205 1206 public: 1207 1208 inline GCAllocPurpose 1209 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { 1210 if (age < _tenuring_threshold && src_region->is_young()) { 1211 return GCAllocForSurvived; 1212 } else { 1213 return GCAllocForTenured; 1214 } 1215 } 1216 1217 inline bool track_object_age(GCAllocPurpose purpose) { 1218 return purpose == GCAllocForSurvived; 1219 } 1220 1221 static const size_t REGIONS_UNLIMITED = ~(size_t)0; 1222 1223 size_t max_regions(int purpose); 1224 1225 // The limit on regions for a particular purpose is reached. 1226 void note_alloc_region_limit_reached(int purpose) { 1227 if (purpose == GCAllocForSurvived) { 1228 _tenuring_threshold = 0; 1229 } 1230 } 1231 1232 void note_start_adding_survivor_regions() { 1233 _survivor_surv_rate_group->start_adding_regions(); 1234 } 1235 1236 void note_stop_adding_survivor_regions() { 1237 _survivor_surv_rate_group->stop_adding_regions(); 1238 } 1239 1240 void record_survivor_regions(size_t regions, 1241 HeapRegion* head, 1242 HeapRegion* tail) { 1243 _recorded_survivor_regions = regions; 1244 _recorded_survivor_head = head; 1245 _recorded_survivor_tail = tail; 1246 } 1247 1248 size_t recorded_survivor_regions() { 1249 return _recorded_survivor_regions; 1250 } 1251 1252 void record_thread_age_table(ageTable* age_table) 1253 { 1254 _survivors_age_table.merge_par(age_table); 1255 } 1256 1257 void update_max_gc_locker_expansion(); 1258 1259 // Calculates survivor space parameters. 1260 void update_survivors_policy(); 1261 1262 }; 1263 1264 // This encapsulates a particular strategy for a g1 Collector. 1265 // 1266 // Start a concurrent mark when our heap size is n bytes 1267 // greater then our heap size was at the last concurrent 1268 // mark. Where n is a function of the CMSTriggerRatio 1269 // and the MinHeapFreeRatio. 1270 // 1271 // Start a g1 collection pause when we have allocated the 1272 // average number of bytes currently being freed in 1273 // a collection, but only if it is at least one region 1274 // full 1275 // 1276 // Resize Heap based on desired 1277 // allocation space, where desired allocation space is 1278 // a function of survival rate and desired future to size. 1279 // 1280 // Choose collection set by first picking all older regions 1281 // which have a survival rate which beats our projected young 1282 // survival rate. Then fill out the number of needed regions 1283 // with young regions. 1284 1285 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { 1286 CollectionSetChooser* _collectionSetChooser; 1287 1288 virtual void choose_collection_set(double target_pause_time_ms); 1289 virtual void record_collection_pause_start(double start_time_sec, 1290 size_t start_used); 1291 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 1292 size_t max_live_bytes); 1293 virtual void record_full_collection_end(); 1294 1295 public: 1296 G1CollectorPolicy_BestRegionsFirst() { 1297 _collectionSetChooser = new CollectionSetChooser(); 1298 } 1299 void record_collection_pause_end(); 1300 // This is not needed any more, after the CSet choosing code was 1301 // changed to use the pause prediction work. But let's leave the 1302 // hook in just in case. 1303 void note_change_in_marked_bytes(HeapRegion* r) { } 1304 #ifndef PRODUCT 1305 bool assertMarkedBytesDataOK(); 1306 #endif 1307 }; 1308 1309 // This should move to some place more general... 1310 1311 // If we have "n" measurements, and we've kept track of their "sum" and the 1312 // "sum_of_squares" of the measurements, this returns the variance of the 1313 // sequence. 1314 inline double variance(int n, double sum_of_squares, double sum) { 1315 double n_d = (double)n; 1316 double avg = sum/n_d; 1317 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 1318 } 1319 1320 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP