1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing 41 // over and over again and introducing subtle problems through small typos and 42 // cutting and pasting mistakes. The macros below introduces a number 43 // sequnce into the following two classes and the methods that access it. 44 45 #define define_num_seq(name) \ 46 private: \ 47 NumberSeq _all_##name##_times_ms; \ 48 public: \ 49 void record_##name##_time_ms(double ms) { \ 50 _all_##name##_times_ms.add(ms); \ 51 } \ 52 NumberSeq* get_##name##_seq() { \ 53 return &_all_##name##_times_ms; \ 54 } 55 56 class MainBodySummary; 57 58 class PauseSummary: public CHeapObj { 59 define_num_seq(total) 60 define_num_seq(other) 61 62 public: 63 virtual MainBodySummary* main_body_summary() { return NULL; } 64 }; 65 66 class MainBodySummary: public CHeapObj { 67 define_num_seq(satb_drain) // optional 68 define_num_seq(parallel) // parallel only 69 define_num_seq(ext_root_scan) 70 define_num_seq(mark_stack_scan) 71 define_num_seq(update_rs) 72 define_num_seq(scan_rs) 73 define_num_seq(obj_copy) 74 define_num_seq(termination) // parallel only 75 define_num_seq(parallel_other) // parallel only 76 define_num_seq(mark_closure) 77 define_num_seq(clear_ct) // parallel only 78 }; 79 80 class Summary: public PauseSummary, 81 public MainBodySummary { 82 public: 83 virtual MainBodySummary* main_body_summary() { return this; } 84 }; 85 86 class G1CollectorPolicy: public CollectorPolicy { 87 protected: 88 // The number of pauses during the execution. 89 long _n_pauses; 90 91 // either equal to the number of parallel threads, if ParallelGCThreads 92 // has been set, or 1 otherwise 93 int _parallel_gc_threads; 94 95 enum SomePrivateConstants { 96 NumPrevPausesForHeuristics = 10 97 }; 98 99 G1MMUTracker* _mmu_tracker; 100 101 void initialize_flags(); 102 103 void initialize_all() { 104 initialize_flags(); 105 initialize_size_info(); 106 initialize_perm_generation(PermGen::MarkSweepCompact); 107 } 108 109 virtual size_t default_init_heap_size() { 110 // Pick some reasonable default. 111 return 8*M; 112 } 113 114 double _cur_collection_start_sec; 115 size_t _cur_collection_pause_used_at_start_bytes; 116 size_t _cur_collection_pause_used_regions_at_start; 117 size_t _prev_collection_pause_used_at_end_bytes; 118 double _cur_collection_par_time_ms; 119 double _cur_satb_drain_time_ms; 120 double _cur_clear_ct_time_ms; 121 bool _satb_drain_time_set; 122 123 #ifndef PRODUCT 124 // Card Table Count Cache stats 125 double _min_clear_cc_time_ms; // min 126 double _max_clear_cc_time_ms; // max 127 double _cur_clear_cc_time_ms; // clearing time during current pause 128 double _cum_clear_cc_time_ms; // cummulative clearing time 129 jlong _num_cc_clears; // number of times the card count cache has been cleared 130 #endif 131 132 // Statistics for recent GC pauses. See below for how indexed. 133 TruncatedSeq* _recent_rs_scan_times_ms; 134 135 // These exclude marking times. 136 TruncatedSeq* _recent_pause_times_ms; 137 TruncatedSeq* _recent_gc_times_ms; 138 139 TruncatedSeq* _recent_CS_bytes_used_before; 140 TruncatedSeq* _recent_CS_bytes_surviving; 141 142 TruncatedSeq* _recent_rs_sizes; 143 144 TruncatedSeq* _concurrent_mark_remark_times_ms; 145 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 146 147 Summary* _summary; 148 149 NumberSeq* _all_pause_times_ms; 150 NumberSeq* _all_full_gc_times_ms; 151 double _stop_world_start; 152 NumberSeq* _all_stop_world_times_ms; 153 NumberSeq* _all_yield_times_ms; 154 155 size_t _region_num_young; 156 size_t _region_num_tenured; 157 size_t _prev_region_num_young; 158 size_t _prev_region_num_tenured; 159 160 NumberSeq* _all_mod_union_times_ms; 161 162 int _aux_num; 163 NumberSeq* _all_aux_times_ms; 164 double* _cur_aux_start_times_ms; 165 double* _cur_aux_times_ms; 166 bool* _cur_aux_times_set; 167 168 double* _par_last_gc_worker_start_times_ms; 169 double* _par_last_ext_root_scan_times_ms; 170 double* _par_last_mark_stack_scan_times_ms; 171 double* _par_last_update_rs_times_ms; 172 double* _par_last_update_rs_processed_buffers; 173 double* _par_last_scan_rs_times_ms; 174 double* _par_last_obj_copy_times_ms; 175 double* _par_last_termination_times_ms; 176 double* _par_last_termination_attempts; 177 double* _par_last_gc_worker_end_times_ms; 178 double* _par_last_gc_worker_times_ms; 179 180 // indicates whether we are in full young or partially young GC mode 181 bool _full_young_gcs; 182 183 // if true, then it tries to dynamically adjust the length of the 184 // young list 185 bool _adaptive_young_list_length; 186 size_t _young_list_target_length; 187 size_t _young_list_fixed_length; 188 size_t _prev_eden_capacity; // used for logging 189 190 // The max number of regions we can extend the eden by while the GC 191 // locker is active. This should be >= _young_list_target_length; 192 size_t _young_list_max_length; 193 194 size_t _young_cset_length; 195 bool _last_young_gc_full; 196 197 unsigned _full_young_pause_num; 198 unsigned _partial_young_pause_num; 199 200 bool _during_marking; 201 bool _in_marking_window; 202 bool _in_marking_window_im; 203 204 SurvRateGroup* _short_lived_surv_rate_group; 205 SurvRateGroup* _survivor_surv_rate_group; 206 // add here any more surv rate groups 207 208 double _gc_overhead_perc; 209 210 double _reserve_factor; 211 size_t _reserve_regions; 212 213 bool during_marking() { 214 return _during_marking; 215 } 216 217 // <NEW PREDICTION> 218 219 private: 220 enum PredictionConstants { 221 TruncatedSeqLength = 10 222 }; 223 224 TruncatedSeq* _alloc_rate_ms_seq; 225 double _prev_collection_pause_end_ms; 226 227 TruncatedSeq* _pending_card_diff_seq; 228 TruncatedSeq* _rs_length_diff_seq; 229 TruncatedSeq* _cost_per_card_ms_seq; 230 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; 231 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; 232 TruncatedSeq* _cost_per_entry_ms_seq; 233 TruncatedSeq* _partially_young_cost_per_entry_ms_seq; 234 TruncatedSeq* _cost_per_byte_ms_seq; 235 TruncatedSeq* _constant_other_time_ms_seq; 236 TruncatedSeq* _young_other_cost_per_region_ms_seq; 237 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 238 239 TruncatedSeq* _pending_cards_seq; 240 TruncatedSeq* _scanned_cards_seq; 241 TruncatedSeq* _rs_lengths_seq; 242 243 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 244 245 TruncatedSeq* _young_gc_eff_seq; 246 247 TruncatedSeq* _max_conc_overhead_seq; 248 249 bool _using_new_ratio_calculations; 250 size_t _min_desired_young_length; // as set on the command line or default calculations 251 size_t _max_desired_young_length; // as set on the command line or default calculations 252 253 size_t _recorded_young_regions; 254 size_t _recorded_non_young_regions; 255 size_t _recorded_region_num; 256 257 size_t _free_regions_at_end_of_collection; 258 259 size_t _recorded_rs_lengths; 260 size_t _max_rs_lengths; 261 262 size_t _recorded_marked_bytes; 263 size_t _recorded_young_bytes; 264 265 size_t _predicted_pending_cards; 266 size_t _predicted_cards_scanned; 267 size_t _predicted_rs_lengths; 268 size_t _predicted_bytes_to_copy; 269 270 double _predicted_survival_ratio; 271 double _predicted_rs_update_time_ms; 272 double _predicted_rs_scan_time_ms; 273 double _predicted_object_copy_time_ms; 274 double _predicted_constant_other_time_ms; 275 double _predicted_young_other_time_ms; 276 double _predicted_non_young_other_time_ms; 277 double _predicted_pause_time_ms; 278 279 double _vtime_diff_ms; 280 281 double _recorded_young_free_cset_time_ms; 282 double _recorded_non_young_free_cset_time_ms; 283 284 double _sigma; 285 double _expensive_region_limit_ms; 286 287 size_t _rs_lengths_prediction; 288 289 size_t _known_garbage_bytes; 290 double _known_garbage_ratio; 291 292 double sigma() { 293 return _sigma; 294 } 295 296 // A function that prevents us putting too much stock in small sample 297 // sets. Returns a number between 2.0 and 1.0, depending on the number 298 // of samples. 5 or more samples yields one; fewer scales linearly from 299 // 2.0 at 1 sample to 1.0 at 5. 300 double confidence_factor(int samples) { 301 if (samples > 4) return 1.0; 302 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 303 } 304 305 double get_new_neg_prediction(TruncatedSeq* seq) { 306 return seq->davg() - sigma() * seq->dsd(); 307 } 308 309 #ifndef PRODUCT 310 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 311 #endif // PRODUCT 312 313 void adjust_concurrent_refinement(double update_rs_time, 314 double update_rs_processed_buffers, 315 double goal_ms); 316 317 protected: 318 double _pause_time_target_ms; 319 double _recorded_young_cset_choice_time_ms; 320 double _recorded_non_young_cset_choice_time_ms; 321 bool _within_target; 322 size_t _pending_cards; 323 size_t _max_pending_cards; 324 325 public: 326 327 void set_region_short_lived(HeapRegion* hr) { 328 hr->install_surv_rate_group(_short_lived_surv_rate_group); 329 } 330 331 void set_region_survivors(HeapRegion* hr) { 332 hr->install_surv_rate_group(_survivor_surv_rate_group); 333 } 334 335 #ifndef PRODUCT 336 bool verify_young_ages(); 337 #endif // PRODUCT 338 339 double get_new_prediction(TruncatedSeq* seq) { 340 return MAX2(seq->davg() + sigma() * seq->dsd(), 341 seq->davg() * confidence_factor(seq->num())); 342 } 343 344 size_t young_cset_length() { 345 return _young_cset_length; 346 } 347 348 void record_max_rs_lengths(size_t rs_lengths) { 349 _max_rs_lengths = rs_lengths; 350 } 351 352 size_t predict_pending_card_diff() { 353 double prediction = get_new_neg_prediction(_pending_card_diff_seq); 354 if (prediction < 0.00001) 355 return 0; 356 else 357 return (size_t) prediction; 358 } 359 360 size_t predict_pending_cards() { 361 size_t max_pending_card_num = _g1->max_pending_card_num(); 362 size_t diff = predict_pending_card_diff(); 363 size_t prediction; 364 if (diff > max_pending_card_num) 365 prediction = max_pending_card_num; 366 else 367 prediction = max_pending_card_num - diff; 368 369 return prediction; 370 } 371 372 size_t predict_rs_length_diff() { 373 return (size_t) get_new_prediction(_rs_length_diff_seq); 374 } 375 376 double predict_alloc_rate_ms() { 377 return get_new_prediction(_alloc_rate_ms_seq); 378 } 379 380 double predict_cost_per_card_ms() { 381 return get_new_prediction(_cost_per_card_ms_seq); 382 } 383 384 double predict_rs_update_time_ms(size_t pending_cards) { 385 return (double) pending_cards * predict_cost_per_card_ms(); 386 } 387 388 double predict_fully_young_cards_per_entry_ratio() { 389 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq); 390 } 391 392 double predict_partially_young_cards_per_entry_ratio() { 393 if (_partially_young_cards_per_entry_ratio_seq->num() < 2) 394 return predict_fully_young_cards_per_entry_ratio(); 395 else 396 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq); 397 } 398 399 size_t predict_young_card_num(size_t rs_length) { 400 return (size_t) ((double) rs_length * 401 predict_fully_young_cards_per_entry_ratio()); 402 } 403 404 size_t predict_non_young_card_num(size_t rs_length) { 405 return (size_t) ((double) rs_length * 406 predict_partially_young_cards_per_entry_ratio()); 407 } 408 409 double predict_rs_scan_time_ms(size_t card_num) { 410 if (full_young_gcs()) 411 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 412 else 413 return predict_partially_young_rs_scan_time_ms(card_num); 414 } 415 416 double predict_partially_young_rs_scan_time_ms(size_t card_num) { 417 if (_partially_young_cost_per_entry_ms_seq->num() < 3) 418 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 419 else 420 return (double) card_num * 421 get_new_prediction(_partially_young_cost_per_entry_ms_seq); 422 } 423 424 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 425 if (_cost_per_byte_ms_during_cm_seq->num() < 3) 426 return 1.1 * (double) bytes_to_copy * 427 get_new_prediction(_cost_per_byte_ms_seq); 428 else 429 return (double) bytes_to_copy * 430 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 431 } 432 433 double predict_object_copy_time_ms(size_t bytes_to_copy) { 434 if (_in_marking_window && !_in_marking_window_im) 435 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 436 else 437 return (double) bytes_to_copy * 438 get_new_prediction(_cost_per_byte_ms_seq); 439 } 440 441 double predict_constant_other_time_ms() { 442 return get_new_prediction(_constant_other_time_ms_seq); 443 } 444 445 double predict_young_other_time_ms(size_t young_num) { 446 return 447 (double) young_num * 448 get_new_prediction(_young_other_cost_per_region_ms_seq); 449 } 450 451 double predict_non_young_other_time_ms(size_t non_young_num) { 452 return 453 (double) non_young_num * 454 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 455 } 456 457 void check_if_region_is_too_expensive(double predicted_time_ms); 458 459 double predict_young_collection_elapsed_time_ms(size_t adjustment); 460 double predict_base_elapsed_time_ms(size_t pending_cards); 461 double predict_base_elapsed_time_ms(size_t pending_cards, 462 size_t scanned_cards); 463 size_t predict_bytes_to_copy(HeapRegion* hr); 464 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 465 466 void start_recording_regions(); 467 void record_cset_region_info(HeapRegion* hr, bool young); 468 void record_non_young_cset_region(HeapRegion* hr); 469 470 void set_recorded_young_regions(size_t n_regions); 471 void set_recorded_young_bytes(size_t bytes); 472 void set_recorded_rs_lengths(size_t rs_lengths); 473 void set_predicted_bytes_to_copy(size_t bytes); 474 475 void end_recording_regions(); 476 477 void record_vtime_diff_ms(double vtime_diff_ms) { 478 _vtime_diff_ms = vtime_diff_ms; 479 } 480 481 void record_young_free_cset_time_ms(double time_ms) { 482 _recorded_young_free_cset_time_ms = time_ms; 483 } 484 485 void record_non_young_free_cset_time_ms(double time_ms) { 486 _recorded_non_young_free_cset_time_ms = time_ms; 487 } 488 489 double predict_young_gc_eff() { 490 return get_new_neg_prediction(_young_gc_eff_seq); 491 } 492 493 double predict_survivor_regions_evac_time(); 494 495 // </NEW PREDICTION> 496 497 public: 498 void cset_regions_freed() { 499 bool propagate = _last_young_gc_full && !_in_marking_window; 500 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 501 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 502 // also call it on any more surv rate groups 503 } 504 505 void set_known_garbage_bytes(size_t known_garbage_bytes) { 506 _known_garbage_bytes = known_garbage_bytes; 507 size_t heap_bytes = _g1->capacity(); 508 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 509 } 510 511 void decrease_known_garbage_bytes(size_t known_garbage_bytes) { 512 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); 513 514 _known_garbage_bytes -= known_garbage_bytes; 515 size_t heap_bytes = _g1->capacity(); 516 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 517 } 518 519 G1MMUTracker* mmu_tracker() { 520 return _mmu_tracker; 521 } 522 523 double max_pause_time_ms() { 524 return _mmu_tracker->max_gc_time() * 1000.0; 525 } 526 527 double predict_remark_time_ms() { 528 return get_new_prediction(_concurrent_mark_remark_times_ms); 529 } 530 531 double predict_cleanup_time_ms() { 532 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 533 } 534 535 // Returns an estimate of the survival rate of the region at yg-age 536 // "yg_age". 537 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 538 TruncatedSeq* seq = surv_rate_group->get_seq(age); 539 if (seq->num() == 0) 540 gclog_or_tty->print("BARF! age is %d", age); 541 guarantee( seq->num() > 0, "invariant" ); 542 double pred = get_new_prediction(seq); 543 if (pred > 1.0) 544 pred = 1.0; 545 return pred; 546 } 547 548 double predict_yg_surv_rate(int age) { 549 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 550 } 551 552 double accum_yg_surv_rate_pred(int age) { 553 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 554 } 555 556 protected: 557 void print_stats(int level, const char* str, double value); 558 void print_stats(int level, const char* str, int value); 559 560 void print_par_stats(int level, const char* str, double* data); 561 void print_par_sizes(int level, const char* str, double* data); 562 563 void check_other_times(int level, 564 NumberSeq* other_times_ms, 565 NumberSeq* calc_other_times_ms) const; 566 567 void print_summary (PauseSummary* stats) const; 568 569 void print_summary (int level, const char* str, NumberSeq* seq) const; 570 void print_summary_sd (int level, const char* str, NumberSeq* seq) const; 571 572 double avg_value (double* data); 573 double max_value (double* data); 574 double sum_of_values (double* data); 575 double max_sum (double* data1, double* data2); 576 577 int _last_satb_drain_processed_buffers; 578 int _last_update_rs_processed_buffers; 579 double _last_pause_time_ms; 580 581 size_t _bytes_in_collection_set_before_gc; 582 size_t _bytes_copied_during_gc; 583 584 // Used to count used bytes in CS. 585 friend class CountCSClosure; 586 587 // Statistics kept per GC stoppage, pause or full. 588 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 589 590 // We track markings. 591 int _num_markings; 592 double _mark_thread_startup_sec; // Time at startup of marking thread 593 594 // Add a new GC of the given duration and end time to the record. 595 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 596 597 // The head of the list (via "next_in_collection_set()") representing the 598 // current collection set. Set from the incrementally built collection 599 // set at the start of the pause. 600 HeapRegion* _collection_set; 601 602 // The number of regions in the collection set. Set from the incrementally 603 // built collection set at the start of an evacuation pause. 604 size_t _collection_set_size; 605 606 // The number of bytes in the collection set before the pause. Set from 607 // the incrementally built collection set at the start of an evacuation 608 // pause. 609 size_t _collection_set_bytes_used_before; 610 611 // The associated information that is maintained while the incremental 612 // collection set is being built with young regions. Used to populate 613 // the recorded info for the evacuation pause. 614 615 enum CSetBuildType { 616 Active, // We are actively building the collection set 617 Inactive // We are not actively building the collection set 618 }; 619 620 CSetBuildType _inc_cset_build_state; 621 622 // The head of the incrementally built collection set. 623 HeapRegion* _inc_cset_head; 624 625 // The tail of the incrementally built collection set. 626 HeapRegion* _inc_cset_tail; 627 628 // The number of regions in the incrementally built collection set. 629 // Used to set _collection_set_size at the start of an evacuation 630 // pause. 631 size_t _inc_cset_size; 632 633 // Used as the index in the surving young words structure 634 // which tracks the amount of space, for each young region, 635 // that survives the pause. 636 size_t _inc_cset_young_index; 637 638 // The number of bytes in the incrementally built collection set. 639 // Used to set _collection_set_bytes_used_before at the start of 640 // an evacuation pause. 641 size_t _inc_cset_bytes_used_before; 642 643 // Used to record the highest end of heap region in collection set 644 HeapWord* _inc_cset_max_finger; 645 646 // The number of recorded used bytes in the young regions 647 // of the collection set. This is the sum of the used() bytes 648 // of retired young regions in the collection set. 649 size_t _inc_cset_recorded_young_bytes; 650 651 // The RSet lengths recorded for regions in the collection set 652 // (updated by the periodic sampling of the regions in the 653 // young list/collection set). 654 size_t _inc_cset_recorded_rs_lengths; 655 656 // The predicted elapsed time it will take to collect the regions 657 // in the collection set (updated by the periodic sampling of the 658 // regions in the young list/collection set). 659 double _inc_cset_predicted_elapsed_time_ms; 660 661 // The predicted bytes to copy for the regions in the collection 662 // set (updated by the periodic sampling of the regions in the 663 // young list/collection set). 664 size_t _inc_cset_predicted_bytes_to_copy; 665 666 // Info about marking. 667 int _n_marks; // Sticky at 2, so we know when we've done at least 2. 668 669 // The number of collection pauses at the end of the last mark. 670 size_t _n_pauses_at_mark_end; 671 672 // Stash a pointer to the g1 heap. 673 G1CollectedHeap* _g1; 674 675 // The average time in ms per collection pause, averaged over recent pauses. 676 double recent_avg_time_for_pauses_ms(); 677 678 // The average time in ms for RS scanning, per pause, averaged 679 // over recent pauses. (Note the RS scanning time for a pause 680 // is itself an average of the RS scanning time for each worker 681 // thread.) 682 double recent_avg_time_for_rs_scan_ms(); 683 684 // The number of "recent" GCs recorded in the number sequences 685 int number_of_recent_gcs(); 686 687 // The average survival ratio, computed by the total number of bytes 688 // suriviving / total number of bytes before collection over the last 689 // several recent pauses. 690 double recent_avg_survival_fraction(); 691 // The survival fraction of the most recent pause; if there have been no 692 // pauses, returns 1.0. 693 double last_survival_fraction(); 694 695 // Returns a "conservative" estimate of the recent survival rate, i.e., 696 // one that may be higher than "recent_avg_survival_fraction". 697 // This is conservative in several ways: 698 // If there have been few pauses, it will assume a potential high 699 // variance, and err on the side of caution. 700 // It puts a lower bound (currently 0.1) on the value it will return. 701 // To try to detect phase changes, if the most recent pause ("latest") has a 702 // higher-than average ("avg") survival rate, it returns that rate. 703 // "work" version is a utility function; young is restricted to young regions. 704 double conservative_avg_survival_fraction_work(double avg, 705 double latest); 706 707 // The arguments are the two sequences that keep track of the number of bytes 708 // surviving and the total number of bytes before collection, resp., 709 // over the last evereal recent pauses 710 // Returns the survival rate for the category in the most recent pause. 711 // If there have been no pauses, returns 1.0. 712 double last_survival_fraction_work(TruncatedSeq* surviving, 713 TruncatedSeq* before); 714 715 // The arguments are the two sequences that keep track of the number of bytes 716 // surviving and the total number of bytes before collection, resp., 717 // over the last several recent pauses 718 // Returns the average survival ration over the last several recent pauses 719 // If there have been no pauses, return 1.0 720 double recent_avg_survival_fraction_work(TruncatedSeq* surviving, 721 TruncatedSeq* before); 722 723 double conservative_avg_survival_fraction() { 724 double avg = recent_avg_survival_fraction(); 725 double latest = last_survival_fraction(); 726 return conservative_avg_survival_fraction_work(avg, latest); 727 } 728 729 // The ratio of gc time to elapsed time, computed over recent pauses. 730 double _recent_avg_pause_time_ratio; 731 732 double recent_avg_pause_time_ratio() { 733 return _recent_avg_pause_time_ratio; 734 } 735 736 // Number of pauses between concurrent marking. 737 size_t _pauses_btwn_concurrent_mark; 738 739 size_t _n_marks_since_last_pause; 740 741 // At the end of a pause we check the heap occupancy and we decide 742 // whether we will start a marking cycle during the next pause. If 743 // we decide that we want to do that, we will set this parameter to 744 // true. So, this parameter will stay true between the end of a 745 // pause and the beginning of a subsequent pause (not necessarily 746 // the next one, see the comments on the next field) when we decide 747 // that we will indeed start a marking cycle and do the initial-mark 748 // work. 749 volatile bool _initiate_conc_mark_if_possible; 750 751 // If initiate_conc_mark_if_possible() is set at the beginning of a 752 // pause, it is a suggestion that the pause should start a marking 753 // cycle by doing the initial-mark work. However, it is possible 754 // that the concurrent marking thread is still finishing up the 755 // previous marking cycle (e.g., clearing the next marking 756 // bitmap). If that is the case we cannot start a new cycle and 757 // we'll have to wait for the concurrent marking thread to finish 758 // what it is doing. In this case we will postpone the marking cycle 759 // initiation decision for the next pause. When we eventually decide 760 // to start a cycle, we will set _during_initial_mark_pause which 761 // will stay true until the end of the initial-mark pause and it's 762 // the condition that indicates that a pause is doing the 763 // initial-mark work. 764 volatile bool _during_initial_mark_pause; 765 766 bool _should_revert_to_full_young_gcs; 767 bool _last_full_young_gc; 768 769 // This set of variables tracks the collector efficiency, in order to 770 // determine whether we should initiate a new marking. 771 double _cur_mark_stop_world_time_ms; 772 double _mark_remark_start_sec; 773 double _mark_cleanup_start_sec; 774 double _mark_closure_time_ms; 775 776 // Update the young list target length either by setting it to the 777 // desired fixed value or by calculating it using G1's pause 778 // prediction model. If no rs_lengths parameter is passed, predict 779 // the RS lengths using the prediction model, otherwise use the 780 // given rs_lengths as the prediction. 781 void update_young_list_target_length(size_t rs_lengths = (size_t) -1); 782 783 // Calculate and return the minimum desired young list target 784 // length. This is the minimum desired young list length according 785 // to the user's inputs. 786 size_t calculate_young_list_desired_min_length(size_t base_min_length); 787 788 // Calculate and return the maximum desired young list target 789 // length. This is the maximum desired young list length according 790 // to the user's inputs. 791 size_t calculate_young_list_desired_max_length(); 792 793 // Calculate and return the maximum young list target length that 794 // can fit into the pause time goal. The parameters are: rs_lengths 795 // represent the prediction of how large the young RSet lengths will 796 // be, base_min_length is the alreay existing number of regions in 797 // the young list, min_length and max_length are the desired min and 798 // max young list length according to the user's inputs. 799 size_t calculate_young_list_target_length(size_t rs_lengths, 800 size_t base_min_length, 801 size_t desired_min_length, 802 size_t desired_max_length); 803 804 // Check whether a given young length (young_length) fits into the 805 // given target pause time and whether the prediction for the amount 806 // of objects to be copied for the given length will fit into the 807 // given free space (expressed by base_free_regions). It is used by 808 // calculate_young_list_target_length(). 809 bool predict_will_fit(size_t young_length, double base_time_ms, 810 size_t base_free_regions, double target_pause_time_ms); 811 812 public: 813 814 G1CollectorPolicy(); 815 816 virtual G1CollectorPolicy* as_g1_policy() { return this; } 817 818 virtual CollectorPolicy::Name kind() { 819 return CollectorPolicy::G1CollectorPolicyKind; 820 } 821 822 // Check the current value of the young list RSet lengths and 823 // compare it against the last prediction. If the current value is 824 // higher, recalculate the young list target length prediction. 825 void revise_young_list_target_length_if_necessary(); 826 827 size_t bytes_in_collection_set() { 828 return _bytes_in_collection_set_before_gc; 829 } 830 831 unsigned calc_gc_alloc_time_stamp() { 832 return _all_pause_times_ms->num() + 1; 833 } 834 835 // This should be called after the heap is resized. 836 void record_new_heap_size(size_t new_number_of_regions); 837 838 protected: 839 840 // Count the number of bytes used in the CS. 841 void count_CS_bytes_used(); 842 843 // Together these do the base cleanup-recording work. Subclasses might 844 // want to put something between them. 845 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, 846 size_t max_live_bytes); 847 void record_concurrent_mark_cleanup_end_work2(); 848 849 void update_young_list_size_using_newratio(size_t number_of_heap_regions); 850 851 public: 852 853 virtual void init(); 854 855 // Create jstat counters for the policy. 856 virtual void initialize_gc_policy_counters(); 857 858 virtual HeapWord* mem_allocate_work(size_t size, 859 bool is_tlab, 860 bool* gc_overhead_limit_was_exceeded); 861 862 // This method controls how a collector handles one or more 863 // of its generations being fully allocated. 864 virtual HeapWord* satisfy_failed_allocation(size_t size, 865 bool is_tlab); 866 867 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 868 869 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 870 871 // The number of collection pauses so far. 872 long n_pauses() const { return _n_pauses; } 873 874 // Update the heuristic info to record a collection pause of the given 875 // start time, where the given number of bytes were used at the start. 876 // This may involve changing the desired size of a collection set. 877 878 virtual void record_stop_world_start(); 879 880 virtual void record_collection_pause_start(double start_time_sec, 881 size_t start_used); 882 883 // Must currently be called while the world is stopped. 884 void record_concurrent_mark_init_end(double 885 mark_init_elapsed_time_ms); 886 887 void record_mark_closure_time(double mark_closure_time_ms); 888 889 virtual void record_concurrent_mark_remark_start(); 890 virtual void record_concurrent_mark_remark_end(); 891 892 virtual void record_concurrent_mark_cleanup_start(); 893 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 894 size_t max_live_bytes); 895 virtual void record_concurrent_mark_cleanup_completed(); 896 897 virtual void record_concurrent_pause(); 898 virtual void record_concurrent_pause_end(); 899 900 virtual void record_collection_pause_end(); 901 void print_heap_transition(); 902 903 // Record the fact that a full collection occurred. 904 virtual void record_full_collection_start(); 905 virtual void record_full_collection_end(); 906 907 void record_gc_worker_start_time(int worker_i, double ms) { 908 _par_last_gc_worker_start_times_ms[worker_i] = ms; 909 } 910 911 void record_ext_root_scan_time(int worker_i, double ms) { 912 _par_last_ext_root_scan_times_ms[worker_i] = ms; 913 } 914 915 void record_mark_stack_scan_time(int worker_i, double ms) { 916 _par_last_mark_stack_scan_times_ms[worker_i] = ms; 917 } 918 919 void record_satb_drain_time(double ms) { 920 _cur_satb_drain_time_ms = ms; 921 _satb_drain_time_set = true; 922 } 923 924 void record_satb_drain_processed_buffers (int processed_buffers) { 925 _last_satb_drain_processed_buffers = processed_buffers; 926 } 927 928 void record_mod_union_time(double ms) { 929 _all_mod_union_times_ms->add(ms); 930 } 931 932 void record_update_rs_time(int thread, double ms) { 933 _par_last_update_rs_times_ms[thread] = ms; 934 } 935 936 void record_update_rs_processed_buffers (int thread, 937 double processed_buffers) { 938 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 939 } 940 941 void record_scan_rs_time(int thread, double ms) { 942 _par_last_scan_rs_times_ms[thread] = ms; 943 } 944 945 void reset_obj_copy_time(int thread) { 946 _par_last_obj_copy_times_ms[thread] = 0.0; 947 } 948 949 void reset_obj_copy_time() { 950 reset_obj_copy_time(0); 951 } 952 953 void record_obj_copy_time(int thread, double ms) { 954 _par_last_obj_copy_times_ms[thread] += ms; 955 } 956 957 void record_termination(int thread, double ms, size_t attempts) { 958 _par_last_termination_times_ms[thread] = ms; 959 _par_last_termination_attempts[thread] = (double) attempts; 960 } 961 962 void record_gc_worker_end_time(int worker_i, double ms) { 963 _par_last_gc_worker_end_times_ms[worker_i] = ms; 964 } 965 966 void record_pause_time_ms(double ms) { 967 _last_pause_time_ms = ms; 968 } 969 970 void record_clear_ct_time(double ms) { 971 _cur_clear_ct_time_ms = ms; 972 } 973 974 void record_par_time(double ms) { 975 _cur_collection_par_time_ms = ms; 976 } 977 978 void record_aux_start_time(int i) { 979 guarantee(i < _aux_num, "should be within range"); 980 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0; 981 } 982 983 void record_aux_end_time(int i) { 984 guarantee(i < _aux_num, "should be within range"); 985 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i]; 986 _cur_aux_times_set[i] = true; 987 _cur_aux_times_ms[i] += ms; 988 } 989 990 #ifndef PRODUCT 991 void record_cc_clear_time(double ms) { 992 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) 993 _min_clear_cc_time_ms = ms; 994 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) 995 _max_clear_cc_time_ms = ms; 996 _cur_clear_cc_time_ms = ms; 997 _cum_clear_cc_time_ms += ms; 998 _num_cc_clears++; 999 } 1000 #endif 1001 1002 // Record how much space we copied during a GC. This is typically 1003 // called when a GC alloc region is being retired. 1004 void record_bytes_copied_during_gc(size_t bytes) { 1005 _bytes_copied_during_gc += bytes; 1006 } 1007 1008 // The amount of space we copied during a GC. 1009 size_t bytes_copied_during_gc() { 1010 return _bytes_copied_during_gc; 1011 } 1012 1013 // Choose a new collection set. Marks the chosen regions as being 1014 // "in_collection_set", and links them together. The head and number of 1015 // the collection set are available via access methods. 1016 virtual void choose_collection_set(double target_pause_time_ms) = 0; 1017 1018 // The head of the list (via "next_in_collection_set()") representing the 1019 // current collection set. 1020 HeapRegion* collection_set() { return _collection_set; } 1021 1022 void clear_collection_set() { _collection_set = NULL; } 1023 1024 // The number of elements in the current collection set. 1025 size_t collection_set_size() { return _collection_set_size; } 1026 1027 // Add "hr" to the CS. 1028 void add_to_collection_set(HeapRegion* hr); 1029 1030 // Incremental CSet Support 1031 1032 // The head of the incrementally built collection set. 1033 HeapRegion* inc_cset_head() { return _inc_cset_head; } 1034 1035 // The tail of the incrementally built collection set. 1036 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 1037 1038 // The number of elements in the incrementally built collection set. 1039 size_t inc_cset_size() { return _inc_cset_size; } 1040 1041 // Initialize incremental collection set info. 1042 void start_incremental_cset_building(); 1043 1044 void clear_incremental_cset() { 1045 _inc_cset_head = NULL; 1046 _inc_cset_tail = NULL; 1047 } 1048 1049 // Stop adding regions to the incremental collection set 1050 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 1051 1052 // Add/remove information about hr to the aggregated information 1053 // for the incrementally built collection set. 1054 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 1055 void remove_from_incremental_cset_info(HeapRegion* hr); 1056 1057 // Update information about hr in the aggregated information for 1058 // the incrementally built collection set. 1059 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 1060 1061 private: 1062 // Update the incremental cset information when adding a region 1063 // (should not be called directly). 1064 void add_region_to_incremental_cset_common(HeapRegion* hr); 1065 1066 public: 1067 // Add hr to the LHS of the incremental collection set. 1068 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 1069 1070 // Add hr to the RHS of the incremental collection set. 1071 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 1072 1073 #ifndef PRODUCT 1074 void print_collection_set(HeapRegion* list_head, outputStream* st); 1075 #endif // !PRODUCT 1076 1077 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 1078 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 1079 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 1080 1081 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 1082 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 1083 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 1084 1085 // This sets the initiate_conc_mark_if_possible() flag to start a 1086 // new cycle, as long as we are not already in one. It's best if it 1087 // is called during a safepoint when the test whether a cycle is in 1088 // progress or not is stable. 1089 bool force_initial_mark_if_outside_cycle(); 1090 1091 // This is called at the very beginning of an evacuation pause (it 1092 // has to be the first thing that the pause does). If 1093 // initiate_conc_mark_if_possible() is true, and the concurrent 1094 // marking thread has completed its work during the previous cycle, 1095 // it will set during_initial_mark_pause() to so that the pause does 1096 // the initial-mark work and start a marking cycle. 1097 void decide_on_conc_mark_initiation(); 1098 1099 // If an expansion would be appropriate, because recent GC overhead had 1100 // exceeded the desired limit, return an amount to expand by. 1101 virtual size_t expansion_amount(); 1102 1103 // note start of mark thread 1104 void note_start_of_mark_thread(); 1105 1106 // The marked bytes of the "r" has changed; reclassify it's desirability 1107 // for marking. Also asserts that "r" is eligible for a CS. 1108 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0; 1109 1110 #ifndef PRODUCT 1111 // Check any appropriate marked bytes info, asserting false if 1112 // something's wrong, else returning "true". 1113 virtual bool assertMarkedBytesDataOK() = 0; 1114 #endif 1115 1116 // Print tracing information. 1117 void print_tracing_info() const; 1118 1119 // Print stats on young survival ratio 1120 void print_yg_surv_rate_info() const; 1121 1122 void finished_recalculating_age_indexes(bool is_survivors) { 1123 if (is_survivors) { 1124 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 1125 } else { 1126 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 1127 } 1128 // do that for any other surv rate groups 1129 } 1130 1131 bool is_young_list_full() { 1132 size_t young_list_length = _g1->young_list()->length(); 1133 size_t young_list_target_length = _young_list_target_length; 1134 return young_list_length >= young_list_target_length; 1135 } 1136 1137 bool can_expand_young_list() { 1138 size_t young_list_length = _g1->young_list()->length(); 1139 size_t young_list_max_length = _young_list_max_length; 1140 return young_list_length < young_list_max_length; 1141 } 1142 1143 void update_region_num(bool young); 1144 1145 bool full_young_gcs() { 1146 return _full_young_gcs; 1147 } 1148 void set_full_young_gcs(bool full_young_gcs) { 1149 _full_young_gcs = full_young_gcs; 1150 } 1151 1152 bool adaptive_young_list_length() { 1153 return _adaptive_young_list_length; 1154 } 1155 void set_adaptive_young_list_length(bool adaptive_young_list_length) { 1156 _adaptive_young_list_length = adaptive_young_list_length; 1157 } 1158 1159 inline double get_gc_eff_factor() { 1160 double ratio = _known_garbage_ratio; 1161 1162 double square = ratio * ratio; 1163 // square = square * square; 1164 double ret = square * 9.0 + 1.0; 1165 #if 0 1166 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); 1167 #endif // 0 1168 guarantee(0.0 <= ret && ret < 10.0, "invariant!"); 1169 return ret; 1170 } 1171 1172 // 1173 // Survivor regions policy. 1174 // 1175 protected: 1176 1177 // Current tenuring threshold, set to 0 if the collector reaches the 1178 // maximum amount of suvivors regions. 1179 int _tenuring_threshold; 1180 1181 // The limit on the number of regions allocated for survivors. 1182 size_t _max_survivor_regions; 1183 1184 // For reporting purposes. 1185 size_t _eden_bytes_before_gc; 1186 size_t _survivor_bytes_before_gc; 1187 size_t _capacity_before_gc; 1188 1189 // The amount of survor regions after a collection. 1190 size_t _recorded_survivor_regions; 1191 // List of survivor regions. 1192 HeapRegion* _recorded_survivor_head; 1193 HeapRegion* _recorded_survivor_tail; 1194 1195 ageTable _survivors_age_table; 1196 1197 public: 1198 1199 inline GCAllocPurpose 1200 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { 1201 if (age < _tenuring_threshold && src_region->is_young()) { 1202 return GCAllocForSurvived; 1203 } else { 1204 return GCAllocForTenured; 1205 } 1206 } 1207 1208 inline bool track_object_age(GCAllocPurpose purpose) { 1209 return purpose == GCAllocForSurvived; 1210 } 1211 1212 static const size_t REGIONS_UNLIMITED = ~(size_t)0; 1213 1214 size_t max_regions(int purpose); 1215 1216 // The limit on regions for a particular purpose is reached. 1217 void note_alloc_region_limit_reached(int purpose) { 1218 if (purpose == GCAllocForSurvived) { 1219 _tenuring_threshold = 0; 1220 } 1221 } 1222 1223 void note_start_adding_survivor_regions() { 1224 _survivor_surv_rate_group->start_adding_regions(); 1225 } 1226 1227 void note_stop_adding_survivor_regions() { 1228 _survivor_surv_rate_group->stop_adding_regions(); 1229 } 1230 1231 void record_survivor_regions(size_t regions, 1232 HeapRegion* head, 1233 HeapRegion* tail) { 1234 _recorded_survivor_regions = regions; 1235 _recorded_survivor_head = head; 1236 _recorded_survivor_tail = tail; 1237 } 1238 1239 size_t recorded_survivor_regions() { 1240 return _recorded_survivor_regions; 1241 } 1242 1243 void record_thread_age_table(ageTable* age_table) 1244 { 1245 _survivors_age_table.merge_par(age_table); 1246 } 1247 1248 void update_max_gc_locker_expansion(); 1249 1250 // Calculates survivor space parameters. 1251 void update_survivors_policy(); 1252 1253 }; 1254 1255 // This encapsulates a particular strategy for a g1 Collector. 1256 // 1257 // Start a concurrent mark when our heap size is n bytes 1258 // greater then our heap size was at the last concurrent 1259 // mark. Where n is a function of the CMSTriggerRatio 1260 // and the MinHeapFreeRatio. 1261 // 1262 // Start a g1 collection pause when we have allocated the 1263 // average number of bytes currently being freed in 1264 // a collection, but only if it is at least one region 1265 // full 1266 // 1267 // Resize Heap based on desired 1268 // allocation space, where desired allocation space is 1269 // a function of survival rate and desired future to size. 1270 // 1271 // Choose collection set by first picking all older regions 1272 // which have a survival rate which beats our projected young 1273 // survival rate. Then fill out the number of needed regions 1274 // with young regions. 1275 1276 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { 1277 CollectionSetChooser* _collectionSetChooser; 1278 // If the estimated is less then desirable, resize if possible. 1279 void expand_if_possible(size_t numRegions); 1280 1281 virtual void choose_collection_set(double target_pause_time_ms); 1282 virtual void record_collection_pause_start(double start_time_sec, 1283 size_t start_used); 1284 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 1285 size_t max_live_bytes); 1286 virtual void record_full_collection_end(); 1287 1288 public: 1289 G1CollectorPolicy_BestRegionsFirst() { 1290 _collectionSetChooser = new CollectionSetChooser(); 1291 } 1292 void record_collection_pause_end(); 1293 // This is not needed any more, after the CSet choosing code was 1294 // changed to use the pause prediction work. But let's leave the 1295 // hook in just in case. 1296 void note_change_in_marked_bytes(HeapRegion* r) { } 1297 #ifndef PRODUCT 1298 bool assertMarkedBytesDataOK(); 1299 #endif 1300 }; 1301 1302 // This should move to some place more general... 1303 1304 // If we have "n" measurements, and we've kept track of their "sum" and the 1305 // "sum_of_squares" of the measurements, this returns the variance of the 1306 // sequence. 1307 inline double variance(int n, double sum_of_squares, double sum) { 1308 double n_d = (double)n; 1309 double avg = sum/n_d; 1310 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 1311 } 1312 1313 // Local Variables: *** 1314 // c-indentation-style: gnu *** 1315 // End: *** 1316 1317 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP