1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing 41 // over and over again and introducing subtle problems through small typos and 42 // cutting and pasting mistakes. The macros below introduces a number 43 // sequnce into the following two classes and the methods that access it. 44 45 #define define_num_seq(name) \ 46 private: \ 47 NumberSeq _all_##name##_times_ms; \ 48 public: \ 49 void record_##name##_time_ms(double ms) { \ 50 _all_##name##_times_ms.add(ms); \ 51 } \ 52 NumberSeq* get_##name##_seq() { \ 53 return &_all_##name##_times_ms; \ 54 } 55 56 class MainBodySummary; 57 58 class PauseSummary: public CHeapObj { 59 define_num_seq(total) 60 define_num_seq(other) 61 62 public: 63 virtual MainBodySummary* main_body_summary() { return NULL; } 64 }; 65 66 class MainBodySummary: public CHeapObj { 67 define_num_seq(satb_drain) // optional 68 define_num_seq(parallel) // parallel only 69 define_num_seq(ext_root_scan) 70 define_num_seq(mark_stack_scan) 71 define_num_seq(update_rs) 72 define_num_seq(scan_rs) 73 define_num_seq(obj_copy) 74 define_num_seq(termination) // parallel only 75 define_num_seq(parallel_other) // parallel only 76 define_num_seq(mark_closure) 77 define_num_seq(clear_ct) // parallel only 78 }; 79 80 class Summary: public PauseSummary, 81 public MainBodySummary { 82 public: 83 virtual MainBodySummary* main_body_summary() { return this; } 84 }; 85 86 class G1CollectorPolicy: public CollectorPolicy { 87 protected: 88 // The number of pauses during the execution. 89 long _n_pauses; 90 91 // either equal to the number of parallel threads, if ParallelGCThreads 92 // has been set, or 1 otherwise 93 int _parallel_gc_threads; 94 95 enum SomePrivateConstants { 96 NumPrevPausesForHeuristics = 10 97 }; 98 99 G1MMUTracker* _mmu_tracker; 100 101 void initialize_flags(); 102 103 void initialize_all() { 104 initialize_flags(); 105 initialize_size_info(); 106 initialize_perm_generation(PermGen::MarkSweepCompact); 107 } 108 109 virtual size_t default_init_heap_size() { 110 // Pick some reasonable default. 111 return 8*M; 112 } 113 114 double _cur_collection_start_sec; 115 size_t _cur_collection_pause_used_at_start_bytes; 116 size_t _cur_collection_pause_used_regions_at_start; 117 size_t _prev_collection_pause_used_at_end_bytes; 118 double _cur_collection_par_time_ms; 119 double _cur_satb_drain_time_ms; 120 double _cur_clear_ct_time_ms; 121 bool _satb_drain_time_set; 122 double _cur_ref_proc_time_ms; 123 double _cur_ref_enq_time_ms; 124 125 #ifndef PRODUCT 126 // Card Table Count Cache stats 127 double _min_clear_cc_time_ms; // min 128 double _max_clear_cc_time_ms; // max 129 double _cur_clear_cc_time_ms; // clearing time during current pause 130 double _cum_clear_cc_time_ms; // cummulative clearing time 131 jlong _num_cc_clears; // number of times the card count cache has been cleared 132 #endif 133 134 // Statistics for recent GC pauses. See below for how indexed. 135 TruncatedSeq* _recent_rs_scan_times_ms; 136 137 // These exclude marking times. 138 TruncatedSeq* _recent_pause_times_ms; 139 TruncatedSeq* _recent_gc_times_ms; 140 141 TruncatedSeq* _recent_CS_bytes_used_before; 142 TruncatedSeq* _recent_CS_bytes_surviving; 143 144 TruncatedSeq* _recent_rs_sizes; 145 146 TruncatedSeq* _concurrent_mark_init_times_ms; 147 TruncatedSeq* _concurrent_mark_remark_times_ms; 148 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 149 150 Summary* _summary; 151 152 NumberSeq* _all_pause_times_ms; 153 NumberSeq* _all_full_gc_times_ms; 154 double _stop_world_start; 155 NumberSeq* _all_stop_world_times_ms; 156 NumberSeq* _all_yield_times_ms; 157 158 size_t _region_num_young; 159 size_t _region_num_tenured; 160 size_t _prev_region_num_young; 161 size_t _prev_region_num_tenured; 162 163 NumberSeq* _all_mod_union_times_ms; 164 165 int _aux_num; 166 NumberSeq* _all_aux_times_ms; 167 double* _cur_aux_start_times_ms; 168 double* _cur_aux_times_ms; 169 bool* _cur_aux_times_set; 170 171 double* _par_last_gc_worker_start_times_ms; 172 double* _par_last_ext_root_scan_times_ms; 173 double* _par_last_mark_stack_scan_times_ms; 174 double* _par_last_update_rs_times_ms; 175 double* _par_last_update_rs_processed_buffers; 176 double* _par_last_scan_rs_times_ms; 177 double* _par_last_obj_copy_times_ms; 178 double* _par_last_termination_times_ms; 179 double* _par_last_termination_attempts; 180 double* _par_last_gc_worker_end_times_ms; 181 double* _par_last_gc_worker_times_ms; 182 183 // indicates that we are in young GC mode 184 bool _in_young_gc_mode; 185 186 // indicates whether we are in full young or partially young GC mode 187 bool _full_young_gcs; 188 189 // if true, then it tries to dynamically adjust the length of the 190 // young list 191 bool _adaptive_young_list_length; 192 size_t _young_list_min_length; 193 size_t _young_list_target_length; 194 size_t _young_list_fixed_length; 195 196 // The max number of regions we can extend the eden by while the GC 197 // locker is active. This should be >= _young_list_target_length; 198 size_t _young_list_max_length; 199 200 size_t _young_cset_length; 201 bool _last_young_gc_full; 202 203 unsigned _full_young_pause_num; 204 unsigned _partial_young_pause_num; 205 206 bool _during_marking; 207 bool _in_marking_window; 208 bool _in_marking_window_im; 209 210 SurvRateGroup* _short_lived_surv_rate_group; 211 SurvRateGroup* _survivor_surv_rate_group; 212 // add here any more surv rate groups 213 214 double _gc_overhead_perc; 215 216 bool during_marking() { 217 return _during_marking; 218 } 219 220 // <NEW PREDICTION> 221 222 private: 223 enum PredictionConstants { 224 TruncatedSeqLength = 10 225 }; 226 227 TruncatedSeq* _alloc_rate_ms_seq; 228 double _prev_collection_pause_end_ms; 229 230 TruncatedSeq* _pending_card_diff_seq; 231 TruncatedSeq* _rs_length_diff_seq; 232 TruncatedSeq* _cost_per_card_ms_seq; 233 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; 234 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; 235 TruncatedSeq* _cost_per_entry_ms_seq; 236 TruncatedSeq* _partially_young_cost_per_entry_ms_seq; 237 TruncatedSeq* _cost_per_byte_ms_seq; 238 TruncatedSeq* _constant_other_time_ms_seq; 239 TruncatedSeq* _young_other_cost_per_region_ms_seq; 240 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 241 242 TruncatedSeq* _pending_cards_seq; 243 TruncatedSeq* _scanned_cards_seq; 244 TruncatedSeq* _rs_lengths_seq; 245 246 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 247 248 TruncatedSeq* _young_gc_eff_seq; 249 250 TruncatedSeq* _max_conc_overhead_seq; 251 252 size_t _recorded_young_regions; 253 size_t _recorded_non_young_regions; 254 size_t _recorded_region_num; 255 256 size_t _free_regions_at_end_of_collection; 257 258 size_t _recorded_rs_lengths; 259 size_t _max_rs_lengths; 260 261 size_t _recorded_marked_bytes; 262 size_t _recorded_young_bytes; 263 264 size_t _predicted_pending_cards; 265 size_t _predicted_cards_scanned; 266 size_t _predicted_rs_lengths; 267 size_t _predicted_bytes_to_copy; 268 269 double _predicted_survival_ratio; 270 double _predicted_rs_update_time_ms; 271 double _predicted_rs_scan_time_ms; 272 double _predicted_object_copy_time_ms; 273 double _predicted_constant_other_time_ms; 274 double _predicted_young_other_time_ms; 275 double _predicted_non_young_other_time_ms; 276 double _predicted_pause_time_ms; 277 278 double _vtime_diff_ms; 279 280 double _recorded_young_free_cset_time_ms; 281 double _recorded_non_young_free_cset_time_ms; 282 283 double _sigma; 284 double _expensive_region_limit_ms; 285 286 size_t _rs_lengths_prediction; 287 288 size_t _known_garbage_bytes; 289 double _known_garbage_ratio; 290 291 double sigma() { 292 return _sigma; 293 } 294 295 // A function that prevents us putting too much stock in small sample 296 // sets. Returns a number between 2.0 and 1.0, depending on the number 297 // of samples. 5 or more samples yields one; fewer scales linearly from 298 // 2.0 at 1 sample to 1.0 at 5. 299 double confidence_factor(int samples) { 300 if (samples > 4) return 1.0; 301 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 302 } 303 304 double get_new_neg_prediction(TruncatedSeq* seq) { 305 return seq->davg() - sigma() * seq->dsd(); 306 } 307 308 #ifndef PRODUCT 309 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 310 #endif // PRODUCT 311 312 void adjust_concurrent_refinement(double update_rs_time, 313 double update_rs_processed_buffers, 314 double goal_ms); 315 316 protected: 317 double _pause_time_target_ms; 318 double _recorded_young_cset_choice_time_ms; 319 double _recorded_non_young_cset_choice_time_ms; 320 bool _within_target; 321 size_t _pending_cards; 322 size_t _max_pending_cards; 323 324 public: 325 326 void set_region_short_lived(HeapRegion* hr) { 327 hr->install_surv_rate_group(_short_lived_surv_rate_group); 328 } 329 330 void set_region_survivors(HeapRegion* hr) { 331 hr->install_surv_rate_group(_survivor_surv_rate_group); 332 } 333 334 #ifndef PRODUCT 335 bool verify_young_ages(); 336 #endif // PRODUCT 337 338 double get_new_prediction(TruncatedSeq* seq) { 339 return MAX2(seq->davg() + sigma() * seq->dsd(), 340 seq->davg() * confidence_factor(seq->num())); 341 } 342 343 size_t young_cset_length() { 344 return _young_cset_length; 345 } 346 347 void record_max_rs_lengths(size_t rs_lengths) { 348 _max_rs_lengths = rs_lengths; 349 } 350 351 size_t predict_pending_card_diff() { 352 double prediction = get_new_neg_prediction(_pending_card_diff_seq); 353 if (prediction < 0.00001) 354 return 0; 355 else 356 return (size_t) prediction; 357 } 358 359 size_t predict_pending_cards() { 360 size_t max_pending_card_num = _g1->max_pending_card_num(); 361 size_t diff = predict_pending_card_diff(); 362 size_t prediction; 363 if (diff > max_pending_card_num) 364 prediction = max_pending_card_num; 365 else 366 prediction = max_pending_card_num - diff; 367 368 return prediction; 369 } 370 371 size_t predict_rs_length_diff() { 372 return (size_t) get_new_prediction(_rs_length_diff_seq); 373 } 374 375 double predict_alloc_rate_ms() { 376 return get_new_prediction(_alloc_rate_ms_seq); 377 } 378 379 double predict_cost_per_card_ms() { 380 return get_new_prediction(_cost_per_card_ms_seq); 381 } 382 383 double predict_rs_update_time_ms(size_t pending_cards) { 384 return (double) pending_cards * predict_cost_per_card_ms(); 385 } 386 387 double predict_fully_young_cards_per_entry_ratio() { 388 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq); 389 } 390 391 double predict_partially_young_cards_per_entry_ratio() { 392 if (_partially_young_cards_per_entry_ratio_seq->num() < 2) 393 return predict_fully_young_cards_per_entry_ratio(); 394 else 395 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq); 396 } 397 398 size_t predict_young_card_num(size_t rs_length) { 399 return (size_t) ((double) rs_length * 400 predict_fully_young_cards_per_entry_ratio()); 401 } 402 403 size_t predict_non_young_card_num(size_t rs_length) { 404 return (size_t) ((double) rs_length * 405 predict_partially_young_cards_per_entry_ratio()); 406 } 407 408 double predict_rs_scan_time_ms(size_t card_num) { 409 if (full_young_gcs()) 410 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 411 else 412 return predict_partially_young_rs_scan_time_ms(card_num); 413 } 414 415 double predict_partially_young_rs_scan_time_ms(size_t card_num) { 416 if (_partially_young_cost_per_entry_ms_seq->num() < 3) 417 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 418 else 419 return (double) card_num * 420 get_new_prediction(_partially_young_cost_per_entry_ms_seq); 421 } 422 423 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 424 if (_cost_per_byte_ms_during_cm_seq->num() < 3) 425 return 1.1 * (double) bytes_to_copy * 426 get_new_prediction(_cost_per_byte_ms_seq); 427 else 428 return (double) bytes_to_copy * 429 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 430 } 431 432 double predict_object_copy_time_ms(size_t bytes_to_copy) { 433 if (_in_marking_window && !_in_marking_window_im) 434 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 435 else 436 return (double) bytes_to_copy * 437 get_new_prediction(_cost_per_byte_ms_seq); 438 } 439 440 double predict_constant_other_time_ms() { 441 return get_new_prediction(_constant_other_time_ms_seq); 442 } 443 444 double predict_young_other_time_ms(size_t young_num) { 445 return 446 (double) young_num * 447 get_new_prediction(_young_other_cost_per_region_ms_seq); 448 } 449 450 double predict_non_young_other_time_ms(size_t non_young_num) { 451 return 452 (double) non_young_num * 453 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 454 } 455 456 void check_if_region_is_too_expensive(double predicted_time_ms); 457 458 double predict_young_collection_elapsed_time_ms(size_t adjustment); 459 double predict_base_elapsed_time_ms(size_t pending_cards); 460 double predict_base_elapsed_time_ms(size_t pending_cards, 461 size_t scanned_cards); 462 size_t predict_bytes_to_copy(HeapRegion* hr); 463 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 464 465 // for use by: calculate_young_list_target_length(rs_length) 466 bool predict_will_fit(size_t young_region_num, 467 double base_time_ms, 468 size_t init_free_regions, 469 double target_pause_time_ms); 470 471 void start_recording_regions(); 472 void record_cset_region_info(HeapRegion* hr, bool young); 473 void record_non_young_cset_region(HeapRegion* hr); 474 475 void set_recorded_young_regions(size_t n_regions); 476 void set_recorded_young_bytes(size_t bytes); 477 void set_recorded_rs_lengths(size_t rs_lengths); 478 void set_predicted_bytes_to_copy(size_t bytes); 479 480 void end_recording_regions(); 481 482 void record_vtime_diff_ms(double vtime_diff_ms) { 483 _vtime_diff_ms = vtime_diff_ms; 484 } 485 486 void record_young_free_cset_time_ms(double time_ms) { 487 _recorded_young_free_cset_time_ms = time_ms; 488 } 489 490 void record_non_young_free_cset_time_ms(double time_ms) { 491 _recorded_non_young_free_cset_time_ms = time_ms; 492 } 493 494 double predict_young_gc_eff() { 495 return get_new_neg_prediction(_young_gc_eff_seq); 496 } 497 498 double predict_survivor_regions_evac_time(); 499 500 // </NEW PREDICTION> 501 502 public: 503 void cset_regions_freed() { 504 bool propagate = _last_young_gc_full && !_in_marking_window; 505 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 506 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 507 // also call it on any more surv rate groups 508 } 509 510 void set_known_garbage_bytes(size_t known_garbage_bytes) { 511 _known_garbage_bytes = known_garbage_bytes; 512 size_t heap_bytes = _g1->capacity(); 513 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 514 } 515 516 void decrease_known_garbage_bytes(size_t known_garbage_bytes) { 517 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); 518 519 _known_garbage_bytes -= known_garbage_bytes; 520 size_t heap_bytes = _g1->capacity(); 521 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 522 } 523 524 G1MMUTracker* mmu_tracker() { 525 return _mmu_tracker; 526 } 527 528 double max_pause_time_ms() { 529 return _mmu_tracker->max_gc_time() * 1000.0; 530 } 531 532 double predict_init_time_ms() { 533 return get_new_prediction(_concurrent_mark_init_times_ms); 534 } 535 536 double predict_remark_time_ms() { 537 return get_new_prediction(_concurrent_mark_remark_times_ms); 538 } 539 540 double predict_cleanup_time_ms() { 541 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 542 } 543 544 // Returns an estimate of the survival rate of the region at yg-age 545 // "yg_age". 546 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 547 TruncatedSeq* seq = surv_rate_group->get_seq(age); 548 if (seq->num() == 0) 549 gclog_or_tty->print("BARF! age is %d", age); 550 guarantee( seq->num() > 0, "invariant" ); 551 double pred = get_new_prediction(seq); 552 if (pred > 1.0) 553 pred = 1.0; 554 return pred; 555 } 556 557 double predict_yg_surv_rate(int age) { 558 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 559 } 560 561 double accum_yg_surv_rate_pred(int age) { 562 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 563 } 564 565 protected: 566 void print_stats(int level, const char* str, double value); 567 void print_stats(int level, const char* str, int value); 568 569 void print_par_stats(int level, const char* str, double* data); 570 void print_par_sizes(int level, const char* str, double* data); 571 572 void check_other_times(int level, 573 NumberSeq* other_times_ms, 574 NumberSeq* calc_other_times_ms) const; 575 576 void print_summary (PauseSummary* stats) const; 577 578 void print_summary (int level, const char* str, NumberSeq* seq) const; 579 void print_summary_sd (int level, const char* str, NumberSeq* seq) const; 580 581 double avg_value (double* data); 582 double max_value (double* data); 583 double sum_of_values (double* data); 584 double max_sum (double* data1, double* data2); 585 586 int _last_satb_drain_processed_buffers; 587 int _last_update_rs_processed_buffers; 588 double _last_pause_time_ms; 589 590 size_t _bytes_in_collection_set_before_gc; 591 size_t _bytes_copied_during_gc; 592 593 // Used to count used bytes in CS. 594 friend class CountCSClosure; 595 596 // Statistics kept per GC stoppage, pause or full. 597 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 598 599 // We track markings. 600 int _num_markings; 601 double _mark_thread_startup_sec; // Time at startup of marking thread 602 603 // Add a new GC of the given duration and end time to the record. 604 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 605 606 // The head of the list (via "next_in_collection_set()") representing the 607 // current collection set. Set from the incrementally built collection 608 // set at the start of the pause. 609 HeapRegion* _collection_set; 610 611 // The number of regions in the collection set. Set from the incrementally 612 // built collection set at the start of an evacuation pause. 613 size_t _collection_set_size; 614 615 // The number of bytes in the collection set before the pause. Set from 616 // the incrementally built collection set at the start of an evacuation 617 // pause. 618 size_t _collection_set_bytes_used_before; 619 620 // The associated information that is maintained while the incremental 621 // collection set is being built with young regions. Used to populate 622 // the recorded info for the evacuation pause. 623 624 enum CSetBuildType { 625 Active, // We are actively building the collection set 626 Inactive // We are not actively building the collection set 627 }; 628 629 CSetBuildType _inc_cset_build_state; 630 631 // The head of the incrementally built collection set. 632 HeapRegion* _inc_cset_head; 633 634 // The tail of the incrementally built collection set. 635 HeapRegion* _inc_cset_tail; 636 637 // The number of regions in the incrementally built collection set. 638 // Used to set _collection_set_size at the start of an evacuation 639 // pause. 640 size_t _inc_cset_size; 641 642 // Used as the index in the surving young words structure 643 // which tracks the amount of space, for each young region, 644 // that survives the pause. 645 size_t _inc_cset_young_index; 646 647 // The number of bytes in the incrementally built collection set. 648 // Used to set _collection_set_bytes_used_before at the start of 649 // an evacuation pause. 650 size_t _inc_cset_bytes_used_before; 651 652 // Used to record the highest end of heap region in collection set 653 HeapWord* _inc_cset_max_finger; 654 655 // The number of recorded used bytes in the young regions 656 // of the collection set. This is the sum of the used() bytes 657 // of retired young regions in the collection set. 658 size_t _inc_cset_recorded_young_bytes; 659 660 // The RSet lengths recorded for regions in the collection set 661 // (updated by the periodic sampling of the regions in the 662 // young list/collection set). 663 size_t _inc_cset_recorded_rs_lengths; 664 665 // The predicted elapsed time it will take to collect the regions 666 // in the collection set (updated by the periodic sampling of the 667 // regions in the young list/collection set). 668 double _inc_cset_predicted_elapsed_time_ms; 669 670 // The predicted bytes to copy for the regions in the collection 671 // set (updated by the periodic sampling of the regions in the 672 // young list/collection set). 673 size_t _inc_cset_predicted_bytes_to_copy; 674 675 // Info about marking. 676 int _n_marks; // Sticky at 2, so we know when we've done at least 2. 677 678 // The number of collection pauses at the end of the last mark. 679 size_t _n_pauses_at_mark_end; 680 681 // Stash a pointer to the g1 heap. 682 G1CollectedHeap* _g1; 683 684 // The average time in ms per collection pause, averaged over recent pauses. 685 double recent_avg_time_for_pauses_ms(); 686 687 // The average time in ms for RS scanning, per pause, averaged 688 // over recent pauses. (Note the RS scanning time for a pause 689 // is itself an average of the RS scanning time for each worker 690 // thread.) 691 double recent_avg_time_for_rs_scan_ms(); 692 693 // The number of "recent" GCs recorded in the number sequences 694 int number_of_recent_gcs(); 695 696 // The average survival ratio, computed by the total number of bytes 697 // suriviving / total number of bytes before collection over the last 698 // several recent pauses. 699 double recent_avg_survival_fraction(); 700 // The survival fraction of the most recent pause; if there have been no 701 // pauses, returns 1.0. 702 double last_survival_fraction(); 703 704 // Returns a "conservative" estimate of the recent survival rate, i.e., 705 // one that may be higher than "recent_avg_survival_fraction". 706 // This is conservative in several ways: 707 // If there have been few pauses, it will assume a potential high 708 // variance, and err on the side of caution. 709 // It puts a lower bound (currently 0.1) on the value it will return. 710 // To try to detect phase changes, if the most recent pause ("latest") has a 711 // higher-than average ("avg") survival rate, it returns that rate. 712 // "work" version is a utility function; young is restricted to young regions. 713 double conservative_avg_survival_fraction_work(double avg, 714 double latest); 715 716 // The arguments are the two sequences that keep track of the number of bytes 717 // surviving and the total number of bytes before collection, resp., 718 // over the last evereal recent pauses 719 // Returns the survival rate for the category in the most recent pause. 720 // If there have been no pauses, returns 1.0. 721 double last_survival_fraction_work(TruncatedSeq* surviving, 722 TruncatedSeq* before); 723 724 // The arguments are the two sequences that keep track of the number of bytes 725 // surviving and the total number of bytes before collection, resp., 726 // over the last several recent pauses 727 // Returns the average survival ration over the last several recent pauses 728 // If there have been no pauses, return 1.0 729 double recent_avg_survival_fraction_work(TruncatedSeq* surviving, 730 TruncatedSeq* before); 731 732 double conservative_avg_survival_fraction() { 733 double avg = recent_avg_survival_fraction(); 734 double latest = last_survival_fraction(); 735 return conservative_avg_survival_fraction_work(avg, latest); 736 } 737 738 // The ratio of gc time to elapsed time, computed over recent pauses. 739 double _recent_avg_pause_time_ratio; 740 741 double recent_avg_pause_time_ratio() { 742 return _recent_avg_pause_time_ratio; 743 } 744 745 // Number of pauses between concurrent marking. 746 size_t _pauses_btwn_concurrent_mark; 747 748 size_t _n_marks_since_last_pause; 749 750 // At the end of a pause we check the heap occupancy and we decide 751 // whether we will start a marking cycle during the next pause. If 752 // we decide that we want to do that, we will set this parameter to 753 // true. So, this parameter will stay true between the end of a 754 // pause and the beginning of a subsequent pause (not necessarily 755 // the next one, see the comments on the next field) when we decide 756 // that we will indeed start a marking cycle and do the initial-mark 757 // work. 758 volatile bool _initiate_conc_mark_if_possible; 759 760 // If initiate_conc_mark_if_possible() is set at the beginning of a 761 // pause, it is a suggestion that the pause should start a marking 762 // cycle by doing the initial-mark work. However, it is possible 763 // that the concurrent marking thread is still finishing up the 764 // previous marking cycle (e.g., clearing the next marking 765 // bitmap). If that is the case we cannot start a new cycle and 766 // we'll have to wait for the concurrent marking thread to finish 767 // what it is doing. In this case we will postpone the marking cycle 768 // initiation decision for the next pause. When we eventually decide 769 // to start a cycle, we will set _during_initial_mark_pause which 770 // will stay true until the end of the initial-mark pause and it's 771 // the condition that indicates that a pause is doing the 772 // initial-mark work. 773 volatile bool _during_initial_mark_pause; 774 775 bool _should_revert_to_full_young_gcs; 776 bool _last_full_young_gc; 777 778 // This set of variables tracks the collector efficiency, in order to 779 // determine whether we should initiate a new marking. 780 double _cur_mark_stop_world_time_ms; 781 double _mark_init_start_sec; 782 double _mark_remark_start_sec; 783 double _mark_cleanup_start_sec; 784 double _mark_closure_time_ms; 785 786 void calculate_young_list_min_length(); 787 void calculate_young_list_target_length(); 788 void calculate_young_list_target_length(size_t rs_lengths); 789 790 public: 791 792 G1CollectorPolicy(); 793 794 virtual G1CollectorPolicy* as_g1_policy() { return this; } 795 796 virtual CollectorPolicy::Name kind() { 797 return CollectorPolicy::G1CollectorPolicyKind; 798 } 799 800 void check_prediction_validity(); 801 802 size_t bytes_in_collection_set() { 803 return _bytes_in_collection_set_before_gc; 804 } 805 806 unsigned calc_gc_alloc_time_stamp() { 807 return _all_pause_times_ms->num() + 1; 808 } 809 810 protected: 811 812 // Count the number of bytes used in the CS. 813 void count_CS_bytes_used(); 814 815 // Together these do the base cleanup-recording work. Subclasses might 816 // want to put something between them. 817 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, 818 size_t max_live_bytes); 819 void record_concurrent_mark_cleanup_end_work2(); 820 821 public: 822 823 virtual void init(); 824 825 // Create jstat counters for the policy. 826 virtual void initialize_gc_policy_counters(); 827 828 virtual HeapWord* mem_allocate_work(size_t size, 829 bool is_tlab, 830 bool* gc_overhead_limit_was_exceeded); 831 832 // This method controls how a collector handles one or more 833 // of its generations being fully allocated. 834 virtual HeapWord* satisfy_failed_allocation(size_t size, 835 bool is_tlab); 836 837 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 838 839 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 840 841 // The number of collection pauses so far. 842 long n_pauses() const { return _n_pauses; } 843 844 // Update the heuristic info to record a collection pause of the given 845 // start time, where the given number of bytes were used at the start. 846 // This may involve changing the desired size of a collection set. 847 848 virtual void record_stop_world_start(); 849 850 virtual void record_collection_pause_start(double start_time_sec, 851 size_t start_used); 852 853 // Must currently be called while the world is stopped. 854 virtual void record_concurrent_mark_init_start(); 855 virtual void record_concurrent_mark_init_end(); 856 void record_concurrent_mark_init_end_pre(double 857 mark_init_elapsed_time_ms); 858 859 void record_mark_closure_time(double mark_closure_time_ms); 860 861 virtual void record_concurrent_mark_remark_start(); 862 virtual void record_concurrent_mark_remark_end(); 863 864 virtual void record_concurrent_mark_cleanup_start(); 865 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 866 size_t max_live_bytes); 867 virtual void record_concurrent_mark_cleanup_completed(); 868 869 virtual void record_concurrent_pause(); 870 virtual void record_concurrent_pause_end(); 871 872 virtual void record_collection_pause_end(); 873 void print_heap_transition(); 874 875 // Record the fact that a full collection occurred. 876 virtual void record_full_collection_start(); 877 virtual void record_full_collection_end(); 878 879 void record_gc_worker_start_time(int worker_i, double ms) { 880 _par_last_gc_worker_start_times_ms[worker_i] = ms; 881 } 882 883 void record_ext_root_scan_time(int worker_i, double ms) { 884 _par_last_ext_root_scan_times_ms[worker_i] = ms; 885 } 886 887 void record_mark_stack_scan_time(int worker_i, double ms) { 888 _par_last_mark_stack_scan_times_ms[worker_i] = ms; 889 } 890 891 void record_satb_drain_time(double ms) { 892 _cur_satb_drain_time_ms = ms; 893 _satb_drain_time_set = true; 894 } 895 896 void record_satb_drain_processed_buffers (int processed_buffers) { 897 _last_satb_drain_processed_buffers = processed_buffers; 898 } 899 900 void record_mod_union_time(double ms) { 901 _all_mod_union_times_ms->add(ms); 902 } 903 904 void record_update_rs_time(int thread, double ms) { 905 _par_last_update_rs_times_ms[thread] = ms; 906 } 907 908 void record_update_rs_processed_buffers (int thread, 909 double processed_buffers) { 910 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 911 } 912 913 void record_scan_rs_time(int thread, double ms) { 914 _par_last_scan_rs_times_ms[thread] = ms; 915 } 916 917 void reset_obj_copy_time(int thread) { 918 _par_last_obj_copy_times_ms[thread] = 0.0; 919 } 920 921 void reset_obj_copy_time() { 922 reset_obj_copy_time(0); 923 } 924 925 void record_obj_copy_time(int thread, double ms) { 926 _par_last_obj_copy_times_ms[thread] += ms; 927 } 928 929 void record_termination(int thread, double ms, size_t attempts) { 930 _par_last_termination_times_ms[thread] = ms; 931 _par_last_termination_attempts[thread] = (double) attempts; 932 } 933 934 void record_gc_worker_end_time(int worker_i, double ms) { 935 _par_last_gc_worker_end_times_ms[worker_i] = ms; 936 } 937 938 void record_pause_time_ms(double ms) { 939 _last_pause_time_ms = ms; 940 } 941 942 void record_clear_ct_time(double ms) { 943 _cur_clear_ct_time_ms = ms; 944 } 945 946 void record_par_time(double ms) { 947 _cur_collection_par_time_ms = ms; 948 } 949 950 void record_aux_start_time(int i) { 951 guarantee(i < _aux_num, "should be within range"); 952 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0; 953 } 954 955 void record_aux_end_time(int i) { 956 guarantee(i < _aux_num, "should be within range"); 957 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i]; 958 _cur_aux_times_set[i] = true; 959 _cur_aux_times_ms[i] += ms; 960 } 961 962 void record_ref_proc_time(double ms) { 963 _cur_ref_proc_time_ms = ms; 964 } 965 966 void record_ref_enq_time(double ms) { 967 _cur_ref_enq_time_ms = ms; 968 } 969 970 #ifndef PRODUCT 971 void record_cc_clear_time(double ms) { 972 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) 973 _min_clear_cc_time_ms = ms; 974 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) 975 _max_clear_cc_time_ms = ms; 976 _cur_clear_cc_time_ms = ms; 977 _cum_clear_cc_time_ms += ms; 978 _num_cc_clears++; 979 } 980 #endif 981 982 // Record how much space we copied during a GC. This is typically 983 // called when a GC alloc region is being retired. 984 void record_bytes_copied_during_gc(size_t bytes) { 985 _bytes_copied_during_gc += bytes; 986 } 987 988 // The amount of space we copied during a GC. 989 size_t bytes_copied_during_gc() { 990 return _bytes_copied_during_gc; 991 } 992 993 // Choose a new collection set. Marks the chosen regions as being 994 // "in_collection_set", and links them together. The head and number of 995 // the collection set are available via access methods. 996 virtual void choose_collection_set(double target_pause_time_ms) = 0; 997 998 // The head of the list (via "next_in_collection_set()") representing the 999 // current collection set. 1000 HeapRegion* collection_set() { return _collection_set; } 1001 1002 void clear_collection_set() { _collection_set = NULL; } 1003 1004 // The number of elements in the current collection set. 1005 size_t collection_set_size() { return _collection_set_size; } 1006 1007 // Add "hr" to the CS. 1008 void add_to_collection_set(HeapRegion* hr); 1009 1010 // Incremental CSet Support 1011 1012 // The head of the incrementally built collection set. 1013 HeapRegion* inc_cset_head() { return _inc_cset_head; } 1014 1015 // The tail of the incrementally built collection set. 1016 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 1017 1018 // The number of elements in the incrementally built collection set. 1019 size_t inc_cset_size() { return _inc_cset_size; } 1020 1021 // Initialize incremental collection set info. 1022 void start_incremental_cset_building(); 1023 1024 void clear_incremental_cset() { 1025 _inc_cset_head = NULL; 1026 _inc_cset_tail = NULL; 1027 } 1028 1029 // Stop adding regions to the incremental collection set 1030 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 1031 1032 // Add/remove information about hr to the aggregated information 1033 // for the incrementally built collection set. 1034 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 1035 void remove_from_incremental_cset_info(HeapRegion* hr); 1036 1037 // Update information about hr in the aggregated information for 1038 // the incrementally built collection set. 1039 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 1040 1041 private: 1042 // Update the incremental cset information when adding a region 1043 // (should not be called directly). 1044 void add_region_to_incremental_cset_common(HeapRegion* hr); 1045 1046 public: 1047 // Add hr to the LHS of the incremental collection set. 1048 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 1049 1050 // Add hr to the RHS of the incremental collection set. 1051 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 1052 1053 #ifndef PRODUCT 1054 void print_collection_set(HeapRegion* list_head, outputStream* st); 1055 #endif // !PRODUCT 1056 1057 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 1058 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 1059 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 1060 1061 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 1062 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 1063 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 1064 1065 // This sets the initiate_conc_mark_if_possible() flag to start a 1066 // new cycle, as long as we are not already in one. It's best if it 1067 // is called during a safepoint when the test whether a cycle is in 1068 // progress or not is stable. 1069 bool force_initial_mark_if_outside_cycle(); 1070 1071 // This is called at the very beginning of an evacuation pause (it 1072 // has to be the first thing that the pause does). If 1073 // initiate_conc_mark_if_possible() is true, and the concurrent 1074 // marking thread has completed its work during the previous cycle, 1075 // it will set during_initial_mark_pause() to so that the pause does 1076 // the initial-mark work and start a marking cycle. 1077 void decide_on_conc_mark_initiation(); 1078 1079 // If an expansion would be appropriate, because recent GC overhead had 1080 // exceeded the desired limit, return an amount to expand by. 1081 virtual size_t expansion_amount(); 1082 1083 // note start of mark thread 1084 void note_start_of_mark_thread(); 1085 1086 // The marked bytes of the "r" has changed; reclassify it's desirability 1087 // for marking. Also asserts that "r" is eligible for a CS. 1088 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0; 1089 1090 #ifndef PRODUCT 1091 // Check any appropriate marked bytes info, asserting false if 1092 // something's wrong, else returning "true". 1093 virtual bool assertMarkedBytesDataOK() = 0; 1094 #endif 1095 1096 // Print tracing information. 1097 void print_tracing_info() const; 1098 1099 // Print stats on young survival ratio 1100 void print_yg_surv_rate_info() const; 1101 1102 void finished_recalculating_age_indexes(bool is_survivors) { 1103 if (is_survivors) { 1104 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 1105 } else { 1106 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 1107 } 1108 // do that for any other surv rate groups 1109 } 1110 1111 bool is_young_list_full() { 1112 size_t young_list_length = _g1->young_list()->length(); 1113 size_t young_list_target_length = _young_list_target_length; 1114 if (G1FixedEdenSize) { 1115 young_list_target_length -= _max_survivor_regions; 1116 } 1117 return young_list_length >= young_list_target_length; 1118 } 1119 1120 bool can_expand_young_list() { 1121 size_t young_list_length = _g1->young_list()->length(); 1122 size_t young_list_max_length = _young_list_max_length; 1123 if (G1FixedEdenSize) { 1124 young_list_max_length -= _max_survivor_regions; 1125 } 1126 return young_list_length < young_list_max_length; 1127 } 1128 1129 void update_region_num(bool young); 1130 1131 bool in_young_gc_mode() { 1132 return _in_young_gc_mode; 1133 } 1134 void set_in_young_gc_mode(bool in_young_gc_mode) { 1135 _in_young_gc_mode = in_young_gc_mode; 1136 } 1137 1138 bool full_young_gcs() { 1139 return _full_young_gcs; 1140 } 1141 void set_full_young_gcs(bool full_young_gcs) { 1142 _full_young_gcs = full_young_gcs; 1143 } 1144 1145 bool adaptive_young_list_length() { 1146 return _adaptive_young_list_length; 1147 } 1148 void set_adaptive_young_list_length(bool adaptive_young_list_length) { 1149 _adaptive_young_list_length = adaptive_young_list_length; 1150 } 1151 1152 inline double get_gc_eff_factor() { 1153 double ratio = _known_garbage_ratio; 1154 1155 double square = ratio * ratio; 1156 // square = square * square; 1157 double ret = square * 9.0 + 1.0; 1158 #if 0 1159 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); 1160 #endif // 0 1161 guarantee(0.0 <= ret && ret < 10.0, "invariant!"); 1162 return ret; 1163 } 1164 1165 // 1166 // Survivor regions policy. 1167 // 1168 protected: 1169 1170 // Current tenuring threshold, set to 0 if the collector reaches the 1171 // maximum amount of suvivors regions. 1172 int _tenuring_threshold; 1173 1174 // The limit on the number of regions allocated for survivors. 1175 size_t _max_survivor_regions; 1176 1177 // For reporting purposes. 1178 size_t _eden_bytes_before_gc; 1179 size_t _survivor_bytes_before_gc; 1180 size_t _capacity_before_gc; 1181 1182 // The amount of survor regions after a collection. 1183 size_t _recorded_survivor_regions; 1184 // List of survivor regions. 1185 HeapRegion* _recorded_survivor_head; 1186 HeapRegion* _recorded_survivor_tail; 1187 1188 ageTable _survivors_age_table; 1189 1190 public: 1191 1192 inline GCAllocPurpose 1193 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { 1194 if (age < _tenuring_threshold && src_region->is_young()) { 1195 return GCAllocForSurvived; 1196 } else { 1197 return GCAllocForTenured; 1198 } 1199 } 1200 1201 inline bool track_object_age(GCAllocPurpose purpose) { 1202 return purpose == GCAllocForSurvived; 1203 } 1204 1205 static const size_t REGIONS_UNLIMITED = ~(size_t)0; 1206 1207 size_t max_regions(int purpose); 1208 1209 // The limit on regions for a particular purpose is reached. 1210 void note_alloc_region_limit_reached(int purpose) { 1211 if (purpose == GCAllocForSurvived) { 1212 _tenuring_threshold = 0; 1213 } 1214 } 1215 1216 void note_start_adding_survivor_regions() { 1217 _survivor_surv_rate_group->start_adding_regions(); 1218 } 1219 1220 void note_stop_adding_survivor_regions() { 1221 _survivor_surv_rate_group->stop_adding_regions(); 1222 } 1223 1224 void record_survivor_regions(size_t regions, 1225 HeapRegion* head, 1226 HeapRegion* tail) { 1227 _recorded_survivor_regions = regions; 1228 _recorded_survivor_head = head; 1229 _recorded_survivor_tail = tail; 1230 } 1231 1232 size_t recorded_survivor_regions() { 1233 return _recorded_survivor_regions; 1234 } 1235 1236 void record_thread_age_table(ageTable* age_table) 1237 { 1238 _survivors_age_table.merge_par(age_table); 1239 } 1240 1241 void calculate_max_gc_locker_expansion(); 1242 1243 // Calculates survivor space parameters. 1244 void calculate_survivors_policy(); 1245 1246 }; 1247 1248 // This encapsulates a particular strategy for a g1 Collector. 1249 // 1250 // Start a concurrent mark when our heap size is n bytes 1251 // greater then our heap size was at the last concurrent 1252 // mark. Where n is a function of the CMSTriggerRatio 1253 // and the MinHeapFreeRatio. 1254 // 1255 // Start a g1 collection pause when we have allocated the 1256 // average number of bytes currently being freed in 1257 // a collection, but only if it is at least one region 1258 // full 1259 // 1260 // Resize Heap based on desired 1261 // allocation space, where desired allocation space is 1262 // a function of survival rate and desired future to size. 1263 // 1264 // Choose collection set by first picking all older regions 1265 // which have a survival rate which beats our projected young 1266 // survival rate. Then fill out the number of needed regions 1267 // with young regions. 1268 1269 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { 1270 CollectionSetChooser* _collectionSetChooser; 1271 // If the estimated is less then desirable, resize if possible. 1272 void expand_if_possible(size_t numRegions); 1273 1274 virtual void choose_collection_set(double target_pause_time_ms); 1275 virtual void record_collection_pause_start(double start_time_sec, 1276 size_t start_used); 1277 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 1278 size_t max_live_bytes); 1279 virtual void record_full_collection_end(); 1280 1281 public: 1282 G1CollectorPolicy_BestRegionsFirst() { 1283 _collectionSetChooser = new CollectionSetChooser(); 1284 } 1285 void record_collection_pause_end(); 1286 // This is not needed any more, after the CSet choosing code was 1287 // changed to use the pause prediction work. But let's leave the 1288 // hook in just in case. 1289 void note_change_in_marked_bytes(HeapRegion* r) { } 1290 #ifndef PRODUCT 1291 bool assertMarkedBytesDataOK(); 1292 #endif 1293 }; 1294 1295 // This should move to some place more general... 1296 1297 // If we have "n" measurements, and we've kept track of their "sum" and the 1298 // "sum_of_squares" of the measurements, this returns the variance of the 1299 // sequence. 1300 inline double variance(int n, double sum_of_squares, double sum) { 1301 double n_d = (double)n; 1302 double avg = sum/n_d; 1303 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 1304 } 1305 1306 // Local Variables: *** 1307 // c-indentation-style: gnu *** 1308 // End: *** 1309 1310 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP