1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing 41 // over and over again and introducing subtle problems through small typos and 42 // cutting and pasting mistakes. The macros below introduces a number 43 // sequnce into the following two classes and the methods that access it. 44 45 #define define_num_seq(name) \ 46 private: \ 47 NumberSeq _all_##name##_times_ms; \ 48 public: \ 49 void record_##name##_time_ms(double ms) { \ 50 _all_##name##_times_ms.add(ms); \ 51 } \ 52 NumberSeq* get_##name##_seq() { \ 53 return &_all_##name##_times_ms; \ 54 } 55 56 class MainBodySummary; 57 58 class PauseSummary: public CHeapObj { 59 define_num_seq(total) 60 define_num_seq(other) 61 62 public: 63 virtual MainBodySummary* main_body_summary() { return NULL; } 64 }; 65 66 class MainBodySummary: public CHeapObj { 67 define_num_seq(satb_drain) // optional 68 define_num_seq(parallel) // parallel only 69 define_num_seq(ext_root_scan) 70 define_num_seq(mark_stack_scan) 71 define_num_seq(update_rs) 72 define_num_seq(scan_rs) 73 define_num_seq(obj_copy) 74 define_num_seq(termination) // parallel only 75 define_num_seq(parallel_other) // parallel only 76 define_num_seq(mark_closure) 77 define_num_seq(clear_ct) // parallel only 78 }; 79 80 class Summary: public PauseSummary, 81 public MainBodySummary { 82 public: 83 virtual MainBodySummary* main_body_summary() { return this; } 84 }; 85 86 class G1CollectorPolicy: public CollectorPolicy { 87 protected: 88 // The number of pauses during the execution. 89 long _n_pauses; 90 91 // either equal to the number of parallel threads, if ParallelGCThreads 92 // has been set, or 1 otherwise 93 int _parallel_gc_threads; 94 95 enum SomePrivateConstants { 96 NumPrevPausesForHeuristics = 10 97 }; 98 99 G1MMUTracker* _mmu_tracker; 100 101 void initialize_flags(); 102 103 void initialize_all() { 104 initialize_flags(); 105 initialize_size_info(); 106 initialize_perm_generation(PermGen::MarkSweepCompact); 107 } 108 109 virtual size_t default_init_heap_size() { 110 // Pick some reasonable default. 111 return 8*M; 112 } 113 114 double _cur_collection_start_sec; 115 size_t _cur_collection_pause_used_at_start_bytes; 116 size_t _cur_collection_pause_used_regions_at_start; 117 size_t _prev_collection_pause_used_at_end_bytes; 118 double _cur_collection_par_time_ms; 119 double _cur_satb_drain_time_ms; 120 double _cur_clear_ct_time_ms; 121 bool _satb_drain_time_set; 122 123 #ifndef PRODUCT 124 // Card Table Count Cache stats 125 double _min_clear_cc_time_ms; // min 126 double _max_clear_cc_time_ms; // max 127 double _cur_clear_cc_time_ms; // clearing time during current pause 128 double _cum_clear_cc_time_ms; // cummulative clearing time 129 jlong _num_cc_clears; // number of times the card count cache has been cleared 130 #endif 131 132 // Statistics for recent GC pauses. See below for how indexed. 133 TruncatedSeq* _recent_rs_scan_times_ms; 134 135 // These exclude marking times. 136 TruncatedSeq* _recent_pause_times_ms; 137 TruncatedSeq* _recent_gc_times_ms; 138 139 TruncatedSeq* _recent_CS_bytes_used_before; 140 TruncatedSeq* _recent_CS_bytes_surviving; 141 142 TruncatedSeq* _recent_rs_sizes; 143 144 TruncatedSeq* _concurrent_mark_init_times_ms; 145 TruncatedSeq* _concurrent_mark_remark_times_ms; 146 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 147 148 Summary* _summary; 149 150 NumberSeq* _all_pause_times_ms; 151 NumberSeq* _all_full_gc_times_ms; 152 double _stop_world_start; 153 NumberSeq* _all_stop_world_times_ms; 154 NumberSeq* _all_yield_times_ms; 155 156 size_t _region_num_young; 157 size_t _region_num_tenured; 158 size_t _prev_region_num_young; 159 size_t _prev_region_num_tenured; 160 161 NumberSeq* _all_mod_union_times_ms; 162 163 int _aux_num; 164 NumberSeq* _all_aux_times_ms; 165 double* _cur_aux_start_times_ms; 166 double* _cur_aux_times_ms; 167 bool* _cur_aux_times_set; 168 169 double* _par_last_gc_worker_start_times_ms; 170 double* _par_last_ext_root_scan_times_ms; 171 double* _par_last_mark_stack_scan_times_ms; 172 double* _par_last_update_rs_times_ms; 173 double* _par_last_update_rs_processed_buffers; 174 double* _par_last_scan_rs_times_ms; 175 double* _par_last_obj_copy_times_ms; 176 double* _par_last_termination_times_ms; 177 double* _par_last_termination_attempts; 178 double* _par_last_gc_worker_end_times_ms; 179 double* _par_last_gc_worker_times_ms; 180 181 // indicates whether we are in full young or partially young GC mode 182 bool _full_young_gcs; 183 184 // if true, then it tries to dynamically adjust the length of the 185 // young list 186 bool _adaptive_young_list_length; 187 size_t _young_list_min_length; 188 size_t _young_list_target_length; 189 size_t _young_list_fixed_length; 190 191 // The max number of regions we can extend the eden by while the GC 192 // locker is active. This should be >= _young_list_target_length; 193 size_t _young_list_max_length; 194 195 size_t _young_cset_length; 196 bool _last_young_gc_full; 197 198 unsigned _full_young_pause_num; 199 unsigned _partial_young_pause_num; 200 201 bool _during_marking; 202 bool _in_marking_window; 203 bool _in_marking_window_im; 204 205 SurvRateGroup* _short_lived_surv_rate_group; 206 SurvRateGroup* _survivor_surv_rate_group; 207 // add here any more surv rate groups 208 209 double _gc_overhead_perc; 210 211 bool during_marking() { 212 return _during_marking; 213 } 214 215 // <NEW PREDICTION> 216 217 private: 218 enum PredictionConstants { 219 TruncatedSeqLength = 10 220 }; 221 222 TruncatedSeq* _alloc_rate_ms_seq; 223 double _prev_collection_pause_end_ms; 224 225 TruncatedSeq* _pending_card_diff_seq; 226 TruncatedSeq* _rs_length_diff_seq; 227 TruncatedSeq* _cost_per_card_ms_seq; 228 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; 229 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; 230 TruncatedSeq* _cost_per_entry_ms_seq; 231 TruncatedSeq* _partially_young_cost_per_entry_ms_seq; 232 TruncatedSeq* _cost_per_byte_ms_seq; 233 TruncatedSeq* _constant_other_time_ms_seq; 234 TruncatedSeq* _young_other_cost_per_region_ms_seq; 235 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 236 237 TruncatedSeq* _pending_cards_seq; 238 TruncatedSeq* _scanned_cards_seq; 239 TruncatedSeq* _rs_lengths_seq; 240 241 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 242 243 TruncatedSeq* _young_gc_eff_seq; 244 245 TruncatedSeq* _max_conc_overhead_seq; 246 247 size_t _recorded_young_regions; 248 size_t _recorded_non_young_regions; 249 size_t _recorded_region_num; 250 251 size_t _free_regions_at_end_of_collection; 252 253 size_t _recorded_rs_lengths; 254 size_t _max_rs_lengths; 255 256 size_t _recorded_marked_bytes; 257 size_t _recorded_young_bytes; 258 259 size_t _predicted_pending_cards; 260 size_t _predicted_cards_scanned; 261 size_t _predicted_rs_lengths; 262 size_t _predicted_bytes_to_copy; 263 264 double _predicted_survival_ratio; 265 double _predicted_rs_update_time_ms; 266 double _predicted_rs_scan_time_ms; 267 double _predicted_object_copy_time_ms; 268 double _predicted_constant_other_time_ms; 269 double _predicted_young_other_time_ms; 270 double _predicted_non_young_other_time_ms; 271 double _predicted_pause_time_ms; 272 273 double _vtime_diff_ms; 274 275 double _recorded_young_free_cset_time_ms; 276 double _recorded_non_young_free_cset_time_ms; 277 278 double _sigma; 279 double _expensive_region_limit_ms; 280 281 size_t _rs_lengths_prediction; 282 283 size_t _known_garbage_bytes; 284 double _known_garbage_ratio; 285 286 double sigma() { 287 return _sigma; 288 } 289 290 // A function that prevents us putting too much stock in small sample 291 // sets. Returns a number between 2.0 and 1.0, depending on the number 292 // of samples. 5 or more samples yields one; fewer scales linearly from 293 // 2.0 at 1 sample to 1.0 at 5. 294 double confidence_factor(int samples) { 295 if (samples > 4) return 1.0; 296 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 297 } 298 299 double get_new_neg_prediction(TruncatedSeq* seq) { 300 return seq->davg() - sigma() * seq->dsd(); 301 } 302 303 #ifndef PRODUCT 304 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 305 #endif // PRODUCT 306 307 void adjust_concurrent_refinement(double update_rs_time, 308 double update_rs_processed_buffers, 309 double goal_ms); 310 311 protected: 312 double _pause_time_target_ms; 313 double _recorded_young_cset_choice_time_ms; 314 double _recorded_non_young_cset_choice_time_ms; 315 bool _within_target; 316 size_t _pending_cards; 317 size_t _max_pending_cards; 318 319 public: 320 321 void set_region_short_lived(HeapRegion* hr) { 322 hr->install_surv_rate_group(_short_lived_surv_rate_group); 323 } 324 325 void set_region_survivors(HeapRegion* hr) { 326 hr->install_surv_rate_group(_survivor_surv_rate_group); 327 } 328 329 #ifndef PRODUCT 330 bool verify_young_ages(); 331 #endif // PRODUCT 332 333 double get_new_prediction(TruncatedSeq* seq) { 334 return MAX2(seq->davg() + sigma() * seq->dsd(), 335 seq->davg() * confidence_factor(seq->num())); 336 } 337 338 size_t young_cset_length() { 339 return _young_cset_length; 340 } 341 342 void record_max_rs_lengths(size_t rs_lengths) { 343 _max_rs_lengths = rs_lengths; 344 } 345 346 size_t predict_pending_card_diff() { 347 double prediction = get_new_neg_prediction(_pending_card_diff_seq); 348 if (prediction < 0.00001) 349 return 0; 350 else 351 return (size_t) prediction; 352 } 353 354 size_t predict_pending_cards() { 355 size_t max_pending_card_num = _g1->max_pending_card_num(); 356 size_t diff = predict_pending_card_diff(); 357 size_t prediction; 358 if (diff > max_pending_card_num) 359 prediction = max_pending_card_num; 360 else 361 prediction = max_pending_card_num - diff; 362 363 return prediction; 364 } 365 366 size_t predict_rs_length_diff() { 367 return (size_t) get_new_prediction(_rs_length_diff_seq); 368 } 369 370 double predict_alloc_rate_ms() { 371 return get_new_prediction(_alloc_rate_ms_seq); 372 } 373 374 double predict_cost_per_card_ms() { 375 return get_new_prediction(_cost_per_card_ms_seq); 376 } 377 378 double predict_rs_update_time_ms(size_t pending_cards) { 379 return (double) pending_cards * predict_cost_per_card_ms(); 380 } 381 382 double predict_fully_young_cards_per_entry_ratio() { 383 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq); 384 } 385 386 double predict_partially_young_cards_per_entry_ratio() { 387 if (_partially_young_cards_per_entry_ratio_seq->num() < 2) 388 return predict_fully_young_cards_per_entry_ratio(); 389 else 390 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq); 391 } 392 393 size_t predict_young_card_num(size_t rs_length) { 394 return (size_t) ((double) rs_length * 395 predict_fully_young_cards_per_entry_ratio()); 396 } 397 398 size_t predict_non_young_card_num(size_t rs_length) { 399 return (size_t) ((double) rs_length * 400 predict_partially_young_cards_per_entry_ratio()); 401 } 402 403 double predict_rs_scan_time_ms(size_t card_num) { 404 if (full_young_gcs()) 405 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 406 else 407 return predict_partially_young_rs_scan_time_ms(card_num); 408 } 409 410 double predict_partially_young_rs_scan_time_ms(size_t card_num) { 411 if (_partially_young_cost_per_entry_ms_seq->num() < 3) 412 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 413 else 414 return (double) card_num * 415 get_new_prediction(_partially_young_cost_per_entry_ms_seq); 416 } 417 418 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 419 if (_cost_per_byte_ms_during_cm_seq->num() < 3) 420 return 1.1 * (double) bytes_to_copy * 421 get_new_prediction(_cost_per_byte_ms_seq); 422 else 423 return (double) bytes_to_copy * 424 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 425 } 426 427 double predict_object_copy_time_ms(size_t bytes_to_copy) { 428 if (_in_marking_window && !_in_marking_window_im) 429 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 430 else 431 return (double) bytes_to_copy * 432 get_new_prediction(_cost_per_byte_ms_seq); 433 } 434 435 double predict_constant_other_time_ms() { 436 return get_new_prediction(_constant_other_time_ms_seq); 437 } 438 439 double predict_young_other_time_ms(size_t young_num) { 440 return 441 (double) young_num * 442 get_new_prediction(_young_other_cost_per_region_ms_seq); 443 } 444 445 double predict_non_young_other_time_ms(size_t non_young_num) { 446 return 447 (double) non_young_num * 448 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 449 } 450 451 void check_if_region_is_too_expensive(double predicted_time_ms); 452 453 double predict_young_collection_elapsed_time_ms(size_t adjustment); 454 double predict_base_elapsed_time_ms(size_t pending_cards); 455 double predict_base_elapsed_time_ms(size_t pending_cards, 456 size_t scanned_cards); 457 size_t predict_bytes_to_copy(HeapRegion* hr); 458 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 459 460 // for use by: calculate_young_list_target_length(rs_length) 461 bool predict_will_fit(size_t young_region_num, 462 double base_time_ms, 463 size_t init_free_regions, 464 double target_pause_time_ms); 465 466 void start_recording_regions(); 467 void record_cset_region_info(HeapRegion* hr, bool young); 468 void record_non_young_cset_region(HeapRegion* hr); 469 470 void set_recorded_young_regions(size_t n_regions); 471 void set_recorded_young_bytes(size_t bytes); 472 void set_recorded_rs_lengths(size_t rs_lengths); 473 void set_predicted_bytes_to_copy(size_t bytes); 474 475 void end_recording_regions(); 476 477 void record_vtime_diff_ms(double vtime_diff_ms) { 478 _vtime_diff_ms = vtime_diff_ms; 479 } 480 481 void record_young_free_cset_time_ms(double time_ms) { 482 _recorded_young_free_cset_time_ms = time_ms; 483 } 484 485 void record_non_young_free_cset_time_ms(double time_ms) { 486 _recorded_non_young_free_cset_time_ms = time_ms; 487 } 488 489 double predict_young_gc_eff() { 490 return get_new_neg_prediction(_young_gc_eff_seq); 491 } 492 493 double predict_survivor_regions_evac_time(); 494 495 // </NEW PREDICTION> 496 497 public: 498 void cset_regions_freed() { 499 bool propagate = _last_young_gc_full && !_in_marking_window; 500 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 501 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 502 // also call it on any more surv rate groups 503 } 504 505 void set_known_garbage_bytes(size_t known_garbage_bytes) { 506 _known_garbage_bytes = known_garbage_bytes; 507 size_t heap_bytes = _g1->capacity(); 508 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 509 } 510 511 void decrease_known_garbage_bytes(size_t known_garbage_bytes) { 512 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); 513 514 _known_garbage_bytes -= known_garbage_bytes; 515 size_t heap_bytes = _g1->capacity(); 516 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 517 } 518 519 G1MMUTracker* mmu_tracker() { 520 return _mmu_tracker; 521 } 522 523 double max_pause_time_ms() { 524 return _mmu_tracker->max_gc_time() * 1000.0; 525 } 526 527 double predict_init_time_ms() { 528 return get_new_prediction(_concurrent_mark_init_times_ms); 529 } 530 531 double predict_remark_time_ms() { 532 return get_new_prediction(_concurrent_mark_remark_times_ms); 533 } 534 535 double predict_cleanup_time_ms() { 536 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 537 } 538 539 // Returns an estimate of the survival rate of the region at yg-age 540 // "yg_age". 541 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 542 TruncatedSeq* seq = surv_rate_group->get_seq(age); 543 if (seq->num() == 0) 544 gclog_or_tty->print("BARF! age is %d", age); 545 guarantee( seq->num() > 0, "invariant" ); 546 double pred = get_new_prediction(seq); 547 if (pred > 1.0) 548 pred = 1.0; 549 return pred; 550 } 551 552 double predict_yg_surv_rate(int age) { 553 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 554 } 555 556 double accum_yg_surv_rate_pred(int age) { 557 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 558 } 559 560 protected: 561 void print_stats(int level, const char* str, double value); 562 void print_stats(int level, const char* str, int value); 563 564 void print_par_stats(int level, const char* str, double* data); 565 void print_par_sizes(int level, const char* str, double* data); 566 567 void check_other_times(int level, 568 NumberSeq* other_times_ms, 569 NumberSeq* calc_other_times_ms) const; 570 571 void print_summary (PauseSummary* stats) const; 572 573 void print_summary (int level, const char* str, NumberSeq* seq) const; 574 void print_summary_sd (int level, const char* str, NumberSeq* seq) const; 575 576 double avg_value (double* data); 577 double max_value (double* data); 578 double sum_of_values (double* data); 579 double max_sum (double* data1, double* data2); 580 581 int _last_satb_drain_processed_buffers; 582 int _last_update_rs_processed_buffers; 583 double _last_pause_time_ms; 584 585 size_t _bytes_in_collection_set_before_gc; 586 size_t _bytes_copied_during_gc; 587 588 // Used to count used bytes in CS. 589 friend class CountCSClosure; 590 591 // Statistics kept per GC stoppage, pause or full. 592 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 593 594 // We track markings. 595 int _num_markings; 596 double _mark_thread_startup_sec; // Time at startup of marking thread 597 598 // Add a new GC of the given duration and end time to the record. 599 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 600 601 // The head of the list (via "next_in_collection_set()") representing the 602 // current collection set. Set from the incrementally built collection 603 // set at the start of the pause. 604 HeapRegion* _collection_set; 605 606 // The number of regions in the collection set. Set from the incrementally 607 // built collection set at the start of an evacuation pause. 608 size_t _collection_set_size; 609 610 // The number of bytes in the collection set before the pause. Set from 611 // the incrementally built collection set at the start of an evacuation 612 // pause. 613 size_t _collection_set_bytes_used_before; 614 615 // The associated information that is maintained while the incremental 616 // collection set is being built with young regions. Used to populate 617 // the recorded info for the evacuation pause. 618 619 enum CSetBuildType { 620 Active, // We are actively building the collection set 621 Inactive // We are not actively building the collection set 622 }; 623 624 CSetBuildType _inc_cset_build_state; 625 626 // The head of the incrementally built collection set. 627 HeapRegion* _inc_cset_head; 628 629 // The tail of the incrementally built collection set. 630 HeapRegion* _inc_cset_tail; 631 632 // The number of regions in the incrementally built collection set. 633 // Used to set _collection_set_size at the start of an evacuation 634 // pause. 635 size_t _inc_cset_size; 636 637 // Used as the index in the surving young words structure 638 // which tracks the amount of space, for each young region, 639 // that survives the pause. 640 size_t _inc_cset_young_index; 641 642 // The number of bytes in the incrementally built collection set. 643 // Used to set _collection_set_bytes_used_before at the start of 644 // an evacuation pause. 645 size_t _inc_cset_bytes_used_before; 646 647 // Used to record the highest end of heap region in collection set 648 HeapWord* _inc_cset_max_finger; 649 650 // The number of recorded used bytes in the young regions 651 // of the collection set. This is the sum of the used() bytes 652 // of retired young regions in the collection set. 653 size_t _inc_cset_recorded_young_bytes; 654 655 // The RSet lengths recorded for regions in the collection set 656 // (updated by the periodic sampling of the regions in the 657 // young list/collection set). 658 size_t _inc_cset_recorded_rs_lengths; 659 660 // The predicted elapsed time it will take to collect the regions 661 // in the collection set (updated by the periodic sampling of the 662 // regions in the young list/collection set). 663 double _inc_cset_predicted_elapsed_time_ms; 664 665 // The predicted bytes to copy for the regions in the collection 666 // set (updated by the periodic sampling of the regions in the 667 // young list/collection set). 668 size_t _inc_cset_predicted_bytes_to_copy; 669 670 // Info about marking. 671 int _n_marks; // Sticky at 2, so we know when we've done at least 2. 672 673 // The number of collection pauses at the end of the last mark. 674 size_t _n_pauses_at_mark_end; 675 676 // Stash a pointer to the g1 heap. 677 G1CollectedHeap* _g1; 678 679 // The average time in ms per collection pause, averaged over recent pauses. 680 double recent_avg_time_for_pauses_ms(); 681 682 // The average time in ms for RS scanning, per pause, averaged 683 // over recent pauses. (Note the RS scanning time for a pause 684 // is itself an average of the RS scanning time for each worker 685 // thread.) 686 double recent_avg_time_for_rs_scan_ms(); 687 688 // The number of "recent" GCs recorded in the number sequences 689 int number_of_recent_gcs(); 690 691 // The average survival ratio, computed by the total number of bytes 692 // suriviving / total number of bytes before collection over the last 693 // several recent pauses. 694 double recent_avg_survival_fraction(); 695 // The survival fraction of the most recent pause; if there have been no 696 // pauses, returns 1.0. 697 double last_survival_fraction(); 698 699 // Returns a "conservative" estimate of the recent survival rate, i.e., 700 // one that may be higher than "recent_avg_survival_fraction". 701 // This is conservative in several ways: 702 // If there have been few pauses, it will assume a potential high 703 // variance, and err on the side of caution. 704 // It puts a lower bound (currently 0.1) on the value it will return. 705 // To try to detect phase changes, if the most recent pause ("latest") has a 706 // higher-than average ("avg") survival rate, it returns that rate. 707 // "work" version is a utility function; young is restricted to young regions. 708 double conservative_avg_survival_fraction_work(double avg, 709 double latest); 710 711 // The arguments are the two sequences that keep track of the number of bytes 712 // surviving and the total number of bytes before collection, resp., 713 // over the last evereal recent pauses 714 // Returns the survival rate for the category in the most recent pause. 715 // If there have been no pauses, returns 1.0. 716 double last_survival_fraction_work(TruncatedSeq* surviving, 717 TruncatedSeq* before); 718 719 // The arguments are the two sequences that keep track of the number of bytes 720 // surviving and the total number of bytes before collection, resp., 721 // over the last several recent pauses 722 // Returns the average survival ration over the last several recent pauses 723 // If there have been no pauses, return 1.0 724 double recent_avg_survival_fraction_work(TruncatedSeq* surviving, 725 TruncatedSeq* before); 726 727 double conservative_avg_survival_fraction() { 728 double avg = recent_avg_survival_fraction(); 729 double latest = last_survival_fraction(); 730 return conservative_avg_survival_fraction_work(avg, latest); 731 } 732 733 // The ratio of gc time to elapsed time, computed over recent pauses. 734 double _recent_avg_pause_time_ratio; 735 736 double recent_avg_pause_time_ratio() { 737 return _recent_avg_pause_time_ratio; 738 } 739 740 // Number of pauses between concurrent marking. 741 size_t _pauses_btwn_concurrent_mark; 742 743 size_t _n_marks_since_last_pause; 744 745 // At the end of a pause we check the heap occupancy and we decide 746 // whether we will start a marking cycle during the next pause. If 747 // we decide that we want to do that, we will set this parameter to 748 // true. So, this parameter will stay true between the end of a 749 // pause and the beginning of a subsequent pause (not necessarily 750 // the next one, see the comments on the next field) when we decide 751 // that we will indeed start a marking cycle and do the initial-mark 752 // work. 753 volatile bool _initiate_conc_mark_if_possible; 754 755 // If initiate_conc_mark_if_possible() is set at the beginning of a 756 // pause, it is a suggestion that the pause should start a marking 757 // cycle by doing the initial-mark work. However, it is possible 758 // that the concurrent marking thread is still finishing up the 759 // previous marking cycle (e.g., clearing the next marking 760 // bitmap). If that is the case we cannot start a new cycle and 761 // we'll have to wait for the concurrent marking thread to finish 762 // what it is doing. In this case we will postpone the marking cycle 763 // initiation decision for the next pause. When we eventually decide 764 // to start a cycle, we will set _during_initial_mark_pause which 765 // will stay true until the end of the initial-mark pause and it's 766 // the condition that indicates that a pause is doing the 767 // initial-mark work. 768 volatile bool _during_initial_mark_pause; 769 770 bool _should_revert_to_full_young_gcs; 771 bool _last_full_young_gc; 772 773 // This set of variables tracks the collector efficiency, in order to 774 // determine whether we should initiate a new marking. 775 double _cur_mark_stop_world_time_ms; 776 double _mark_init_start_sec; 777 double _mark_remark_start_sec; 778 double _mark_cleanup_start_sec; 779 double _mark_closure_time_ms; 780 781 void calculate_young_list_min_length(); 782 void calculate_young_list_target_length(); 783 void calculate_young_list_target_length(size_t rs_lengths); 784 785 public: 786 787 G1CollectorPolicy(); 788 789 virtual G1CollectorPolicy* as_g1_policy() { return this; } 790 791 virtual CollectorPolicy::Name kind() { 792 return CollectorPolicy::G1CollectorPolicyKind; 793 } 794 795 void check_prediction_validity(); 796 797 size_t bytes_in_collection_set() { 798 return _bytes_in_collection_set_before_gc; 799 } 800 801 unsigned calc_gc_alloc_time_stamp() { 802 return _all_pause_times_ms->num() + 1; 803 } 804 805 protected: 806 807 // Count the number of bytes used in the CS. 808 void count_CS_bytes_used(); 809 810 // Together these do the base cleanup-recording work. Subclasses might 811 // want to put something between them. 812 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, 813 size_t max_live_bytes); 814 void record_concurrent_mark_cleanup_end_work2(); 815 816 public: 817 818 virtual void init(); 819 820 // Create jstat counters for the policy. 821 virtual void initialize_gc_policy_counters(); 822 823 virtual HeapWord* mem_allocate_work(size_t size, 824 bool is_tlab, 825 bool* gc_overhead_limit_was_exceeded); 826 827 // This method controls how a collector handles one or more 828 // of its generations being fully allocated. 829 virtual HeapWord* satisfy_failed_allocation(size_t size, 830 bool is_tlab); 831 832 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 833 834 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 835 836 // The number of collection pauses so far. 837 long n_pauses() const { return _n_pauses; } 838 839 // Update the heuristic info to record a collection pause of the given 840 // start time, where the given number of bytes were used at the start. 841 // This may involve changing the desired size of a collection set. 842 843 virtual void record_stop_world_start(); 844 845 virtual void record_collection_pause_start(double start_time_sec, 846 size_t start_used); 847 848 // Must currently be called while the world is stopped. 849 virtual void record_concurrent_mark_init_start(); 850 virtual void record_concurrent_mark_init_end(); 851 void record_concurrent_mark_init_end_pre(double 852 mark_init_elapsed_time_ms); 853 854 void record_mark_closure_time(double mark_closure_time_ms); 855 856 virtual void record_concurrent_mark_remark_start(); 857 virtual void record_concurrent_mark_remark_end(); 858 859 virtual void record_concurrent_mark_cleanup_start(); 860 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 861 size_t max_live_bytes); 862 virtual void record_concurrent_mark_cleanup_completed(); 863 864 virtual void record_concurrent_pause(); 865 virtual void record_concurrent_pause_end(); 866 867 virtual void record_collection_pause_end(); 868 void print_heap_transition(); 869 870 // Record the fact that a full collection occurred. 871 virtual void record_full_collection_start(); 872 virtual void record_full_collection_end(); 873 874 void record_gc_worker_start_time(int worker_i, double ms) { 875 _par_last_gc_worker_start_times_ms[worker_i] = ms; 876 } 877 878 void record_ext_root_scan_time(int worker_i, double ms) { 879 _par_last_ext_root_scan_times_ms[worker_i] = ms; 880 } 881 882 void record_mark_stack_scan_time(int worker_i, double ms) { 883 _par_last_mark_stack_scan_times_ms[worker_i] = ms; 884 } 885 886 void record_satb_drain_time(double ms) { 887 _cur_satb_drain_time_ms = ms; 888 _satb_drain_time_set = true; 889 } 890 891 void record_satb_drain_processed_buffers (int processed_buffers) { 892 _last_satb_drain_processed_buffers = processed_buffers; 893 } 894 895 void record_mod_union_time(double ms) { 896 _all_mod_union_times_ms->add(ms); 897 } 898 899 void record_update_rs_time(int thread, double ms) { 900 _par_last_update_rs_times_ms[thread] = ms; 901 } 902 903 void record_update_rs_processed_buffers (int thread, 904 double processed_buffers) { 905 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 906 } 907 908 void record_scan_rs_time(int thread, double ms) { 909 _par_last_scan_rs_times_ms[thread] = ms; 910 } 911 912 void reset_obj_copy_time(int thread) { 913 _par_last_obj_copy_times_ms[thread] = 0.0; 914 } 915 916 void reset_obj_copy_time() { 917 reset_obj_copy_time(0); 918 } 919 920 void record_obj_copy_time(int thread, double ms) { 921 _par_last_obj_copy_times_ms[thread] += ms; 922 } 923 924 void record_termination(int thread, double ms, size_t attempts) { 925 _par_last_termination_times_ms[thread] = ms; 926 _par_last_termination_attempts[thread] = (double) attempts; 927 } 928 929 void record_gc_worker_end_time(int worker_i, double ms) { 930 _par_last_gc_worker_end_times_ms[worker_i] = ms; 931 } 932 933 void record_pause_time_ms(double ms) { 934 _last_pause_time_ms = ms; 935 } 936 937 void record_clear_ct_time(double ms) { 938 _cur_clear_ct_time_ms = ms; 939 } 940 941 void record_par_time(double ms) { 942 _cur_collection_par_time_ms = ms; 943 } 944 945 void record_aux_start_time(int i) { 946 guarantee(i < _aux_num, "should be within range"); 947 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0; 948 } 949 950 void record_aux_end_time(int i) { 951 guarantee(i < _aux_num, "should be within range"); 952 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i]; 953 _cur_aux_times_set[i] = true; 954 _cur_aux_times_ms[i] += ms; 955 } 956 957 #ifndef PRODUCT 958 void record_cc_clear_time(double ms) { 959 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) 960 _min_clear_cc_time_ms = ms; 961 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) 962 _max_clear_cc_time_ms = ms; 963 _cur_clear_cc_time_ms = ms; 964 _cum_clear_cc_time_ms += ms; 965 _num_cc_clears++; 966 } 967 #endif 968 969 // Record how much space we copied during a GC. This is typically 970 // called when a GC alloc region is being retired. 971 void record_bytes_copied_during_gc(size_t bytes) { 972 _bytes_copied_during_gc += bytes; 973 } 974 975 // The amount of space we copied during a GC. 976 size_t bytes_copied_during_gc() { 977 return _bytes_copied_during_gc; 978 } 979 980 // Choose a new collection set. Marks the chosen regions as being 981 // "in_collection_set", and links them together. The head and number of 982 // the collection set are available via access methods. 983 virtual void choose_collection_set(double target_pause_time_ms) = 0; 984 985 // The head of the list (via "next_in_collection_set()") representing the 986 // current collection set. 987 HeapRegion* collection_set() { return _collection_set; } 988 989 void clear_collection_set() { _collection_set = NULL; } 990 991 // The number of elements in the current collection set. 992 size_t collection_set_size() { return _collection_set_size; } 993 994 // Add "hr" to the CS. 995 void add_to_collection_set(HeapRegion* hr); 996 997 // Incremental CSet Support 998 999 // The head of the incrementally built collection set. 1000 HeapRegion* inc_cset_head() { return _inc_cset_head; } 1001 1002 // The tail of the incrementally built collection set. 1003 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 1004 1005 // The number of elements in the incrementally built collection set. 1006 size_t inc_cset_size() { return _inc_cset_size; } 1007 1008 // Initialize incremental collection set info. 1009 void start_incremental_cset_building(); 1010 1011 void clear_incremental_cset() { 1012 _inc_cset_head = NULL; 1013 _inc_cset_tail = NULL; 1014 } 1015 1016 // Stop adding regions to the incremental collection set 1017 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 1018 1019 // Add/remove information about hr to the aggregated information 1020 // for the incrementally built collection set. 1021 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 1022 void remove_from_incremental_cset_info(HeapRegion* hr); 1023 1024 // Update information about hr in the aggregated information for 1025 // the incrementally built collection set. 1026 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 1027 1028 private: 1029 // Update the incremental cset information when adding a region 1030 // (should not be called directly). 1031 void add_region_to_incremental_cset_common(HeapRegion* hr); 1032 1033 public: 1034 // Add hr to the LHS of the incremental collection set. 1035 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 1036 1037 // Add hr to the RHS of the incremental collection set. 1038 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 1039 1040 #ifndef PRODUCT 1041 void print_collection_set(HeapRegion* list_head, outputStream* st); 1042 #endif // !PRODUCT 1043 1044 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 1045 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 1046 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 1047 1048 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 1049 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 1050 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 1051 1052 // This sets the initiate_conc_mark_if_possible() flag to start a 1053 // new cycle, as long as we are not already in one. It's best if it 1054 // is called during a safepoint when the test whether a cycle is in 1055 // progress or not is stable. 1056 bool force_initial_mark_if_outside_cycle(); 1057 1058 // This is called at the very beginning of an evacuation pause (it 1059 // has to be the first thing that the pause does). If 1060 // initiate_conc_mark_if_possible() is true, and the concurrent 1061 // marking thread has completed its work during the previous cycle, 1062 // it will set during_initial_mark_pause() to so that the pause does 1063 // the initial-mark work and start a marking cycle. 1064 void decide_on_conc_mark_initiation(); 1065 1066 // If an expansion would be appropriate, because recent GC overhead had 1067 // exceeded the desired limit, return an amount to expand by. 1068 virtual size_t expansion_amount(); 1069 1070 // note start of mark thread 1071 void note_start_of_mark_thread(); 1072 1073 // The marked bytes of the "r" has changed; reclassify it's desirability 1074 // for marking. Also asserts that "r" is eligible for a CS. 1075 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0; 1076 1077 #ifndef PRODUCT 1078 // Check any appropriate marked bytes info, asserting false if 1079 // something's wrong, else returning "true". 1080 virtual bool assertMarkedBytesDataOK() = 0; 1081 #endif 1082 1083 // Print tracing information. 1084 void print_tracing_info() const; 1085 1086 // Print stats on young survival ratio 1087 void print_yg_surv_rate_info() const; 1088 1089 void finished_recalculating_age_indexes(bool is_survivors) { 1090 if (is_survivors) { 1091 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 1092 } else { 1093 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 1094 } 1095 // do that for any other surv rate groups 1096 } 1097 1098 bool is_young_list_full() { 1099 size_t young_list_length = _g1->young_list()->length(); 1100 size_t young_list_target_length = _young_list_target_length; 1101 if (G1FixedEdenSize) { 1102 young_list_target_length -= _max_survivor_regions; 1103 } 1104 return young_list_length >= young_list_target_length; 1105 } 1106 1107 bool can_expand_young_list() { 1108 size_t young_list_length = _g1->young_list()->length(); 1109 size_t young_list_max_length = _young_list_max_length; 1110 if (G1FixedEdenSize) { 1111 young_list_max_length -= _max_survivor_regions; 1112 } 1113 return young_list_length < young_list_max_length; 1114 } 1115 1116 void update_region_num(bool young); 1117 1118 bool full_young_gcs() { 1119 return _full_young_gcs; 1120 } 1121 void set_full_young_gcs(bool full_young_gcs) { 1122 _full_young_gcs = full_young_gcs; 1123 } 1124 1125 bool adaptive_young_list_length() { 1126 return _adaptive_young_list_length; 1127 } 1128 void set_adaptive_young_list_length(bool adaptive_young_list_length) { 1129 _adaptive_young_list_length = adaptive_young_list_length; 1130 } 1131 1132 inline double get_gc_eff_factor() { 1133 double ratio = _known_garbage_ratio; 1134 1135 double square = ratio * ratio; 1136 // square = square * square; 1137 double ret = square * 9.0 + 1.0; 1138 #if 0 1139 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); 1140 #endif // 0 1141 guarantee(0.0 <= ret && ret < 10.0, "invariant!"); 1142 return ret; 1143 } 1144 1145 // 1146 // Survivor regions policy. 1147 // 1148 protected: 1149 1150 // Current tenuring threshold, set to 0 if the collector reaches the 1151 // maximum amount of suvivors regions. 1152 int _tenuring_threshold; 1153 1154 // The limit on the number of regions allocated for survivors. 1155 size_t _max_survivor_regions; 1156 1157 // For reporting purposes. 1158 size_t _eden_bytes_before_gc; 1159 size_t _survivor_bytes_before_gc; 1160 size_t _capacity_before_gc; 1161 1162 // The amount of survor regions after a collection. 1163 size_t _recorded_survivor_regions; 1164 // List of survivor regions. 1165 HeapRegion* _recorded_survivor_head; 1166 HeapRegion* _recorded_survivor_tail; 1167 1168 ageTable _survivors_age_table; 1169 1170 public: 1171 1172 inline GCAllocPurpose 1173 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { 1174 if (age < _tenuring_threshold && src_region->is_young()) { 1175 return GCAllocForSurvived; 1176 } else { 1177 return GCAllocForTenured; 1178 } 1179 } 1180 1181 inline bool track_object_age(GCAllocPurpose purpose) { 1182 return purpose == GCAllocForSurvived; 1183 } 1184 1185 static const size_t REGIONS_UNLIMITED = ~(size_t)0; 1186 1187 size_t max_regions(int purpose); 1188 1189 // The limit on regions for a particular purpose is reached. 1190 void note_alloc_region_limit_reached(int purpose) { 1191 if (purpose == GCAllocForSurvived) { 1192 _tenuring_threshold = 0; 1193 } 1194 } 1195 1196 void note_start_adding_survivor_regions() { 1197 _survivor_surv_rate_group->start_adding_regions(); 1198 } 1199 1200 void note_stop_adding_survivor_regions() { 1201 _survivor_surv_rate_group->stop_adding_regions(); 1202 } 1203 1204 void record_survivor_regions(size_t regions, 1205 HeapRegion* head, 1206 HeapRegion* tail) { 1207 _recorded_survivor_regions = regions; 1208 _recorded_survivor_head = head; 1209 _recorded_survivor_tail = tail; 1210 } 1211 1212 size_t recorded_survivor_regions() { 1213 return _recorded_survivor_regions; 1214 } 1215 1216 void record_thread_age_table(ageTable* age_table) 1217 { 1218 _survivors_age_table.merge_par(age_table); 1219 } 1220 1221 void calculate_max_gc_locker_expansion(); 1222 1223 // Calculates survivor space parameters. 1224 void calculate_survivors_policy(); 1225 1226 }; 1227 1228 // This encapsulates a particular strategy for a g1 Collector. 1229 // 1230 // Start a concurrent mark when our heap size is n bytes 1231 // greater then our heap size was at the last concurrent 1232 // mark. Where n is a function of the CMSTriggerRatio 1233 // and the MinHeapFreeRatio. 1234 // 1235 // Start a g1 collection pause when we have allocated the 1236 // average number of bytes currently being freed in 1237 // a collection, but only if it is at least one region 1238 // full 1239 // 1240 // Resize Heap based on desired 1241 // allocation space, where desired allocation space is 1242 // a function of survival rate and desired future to size. 1243 // 1244 // Choose collection set by first picking all older regions 1245 // which have a survival rate which beats our projected young 1246 // survival rate. Then fill out the number of needed regions 1247 // with young regions. 1248 1249 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { 1250 CollectionSetChooser* _collectionSetChooser; 1251 // If the estimated is less then desirable, resize if possible. 1252 void expand_if_possible(size_t numRegions); 1253 1254 virtual void choose_collection_set(double target_pause_time_ms); 1255 virtual void record_collection_pause_start(double start_time_sec, 1256 size_t start_used); 1257 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 1258 size_t max_live_bytes); 1259 virtual void record_full_collection_end(); 1260 1261 public: 1262 G1CollectorPolicy_BestRegionsFirst() { 1263 _collectionSetChooser = new CollectionSetChooser(); 1264 } 1265 void record_collection_pause_end(); 1266 // This is not needed any more, after the CSet choosing code was 1267 // changed to use the pause prediction work. But let's leave the 1268 // hook in just in case. 1269 void note_change_in_marked_bytes(HeapRegion* r) { } 1270 #ifndef PRODUCT 1271 bool assertMarkedBytesDataOK(); 1272 #endif 1273 }; 1274 1275 // This should move to some place more general... 1276 1277 // If we have "n" measurements, and we've kept track of their "sum" and the 1278 // "sum_of_squares" of the measurements, this returns the variance of the 1279 // sequence. 1280 inline double variance(int n, double sum_of_squares, double sum) { 1281 double n_d = (double)n; 1282 double avg = sum/n_d; 1283 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 1284 } 1285 1286 // Local Variables: *** 1287 // c-indentation-style: gnu *** 1288 // End: *** 1289 1290 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP