1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 40 class TraceGen0TimeData : public CHeapObj { 41 private: 42 unsigned _young_pause_num; 43 unsigned _mixed_pause_num; 44 45 NumberSeq _all_stop_world_times_ms; 46 NumberSeq _all_yield_times_ms; 47 48 NumberSeq _total; 49 NumberSeq _other; 50 NumberSeq _root_region_scan_wait; 51 NumberSeq _parallel; 52 NumberSeq _ext_root_scan; 53 NumberSeq _satb_filtering; 54 NumberSeq _update_rs; 55 NumberSeq _scan_rs; 56 NumberSeq _obj_copy; 57 NumberSeq _termination; 58 NumberSeq _parallel_other; 59 NumberSeq _clear_ct; 60 61 void print_summary (int level, const char* str, const NumberSeq* seq) const; 62 void print_summary_sd (int level, const char* str, const NumberSeq* seq) const; 63 64 public: 65 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; 66 void record_start_collection(double time_to_stop_the_world_ms); 67 void record_yield_time(double yield_time_ms); 68 void record_end_collection( 69 double total_ms, 70 double other_ms, 71 double root_region_scan_wait_ms, 72 double parallel_ms, 73 double ext_root_scan_ms, 74 double satb_filtering_ms, 75 double update_rs_ms, 76 double scan_rs_ms, 77 double obj_copy_ms, 78 double termination_ms, 79 double parallel_other_ms, 80 double clear_ct_ms); 81 void increment_collection_count(bool mixed); 82 void print() const; 83 }; 84 85 class TraceGen1TimeData : public CHeapObj { 86 private: 87 NumberSeq _all_full_gc_times; 88 89 public: 90 void record_full_collection(double full_gc_time_ms); 91 void print() const; 92 }; 93 94 // There are three command line options related to the young gen size: 95 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 96 // just a short form for NewSize==MaxNewSize). G1 will use its internal 97 // heuristics to calculate the actual young gen size, so these options 98 // basically only limit the range within which G1 can pick a young gen 99 // size. Also, these are general options taking byte sizes. G1 will 100 // internally work with a number of regions instead. So, some rounding 101 // will occur. 102 // 103 // If nothing related to the the young gen size is set on the command 104 // line we should allow the young gen to be between 105 // G1DefaultMinNewGenPercent and G1DefaultMaxNewGenPercent of the 106 // heap size. This means that every time the heap size changes the 107 // limits for the young gen size will be updated. 108 // 109 // If only -XX:NewSize is set we should use the specified value as the 110 // minimum size for young gen. Still using G1DefaultMaxNewGenPercent 111 // of the heap as maximum. 112 // 113 // If only -XX:MaxNewSize is set we should use the specified value as the 114 // maximum size for young gen. Still using G1DefaultMinNewGenPercent 115 // of the heap as minimum. 116 // 117 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 118 // No updates when the heap size changes. There is a special case when 119 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 120 // different heuristic for calculating the collection set when we do mixed 121 // collection. 122 // 123 // If only -XX:NewRatio is set we should use the specified ratio of the heap 124 // as both min and max. This will be interpreted as "fixed" just like the 125 // NewSize==MaxNewSize case above. But we will update the min and max 126 // everytime the heap size changes. 127 // 128 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 129 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 130 class G1YoungGenSizer : public CHeapObj { 131 private: 132 enum SizerKind { 133 SizerDefaults, 134 SizerNewSizeOnly, 135 SizerMaxNewSizeOnly, 136 SizerMaxAndNewSize, 137 SizerNewRatio 138 }; 139 SizerKind _sizer_kind; 140 uint _min_desired_young_length; 141 uint _max_desired_young_length; 142 bool _adaptive_size; 143 uint calculate_default_min_length(uint new_number_of_heap_regions); 144 uint calculate_default_max_length(uint new_number_of_heap_regions); 145 146 public: 147 G1YoungGenSizer(); 148 void heap_size_changed(uint new_number_of_heap_regions); 149 uint min_desired_young_length() { 150 return _min_desired_young_length; 151 } 152 uint max_desired_young_length() { 153 return _max_desired_young_length; 154 } 155 bool adaptive_young_list_length() { 156 return _adaptive_size; 157 } 158 }; 159 160 class G1CollectorPolicy: public CollectorPolicy { 161 private: 162 // either equal to the number of parallel threads, if ParallelGCThreads 163 // has been set, or 1 otherwise 164 int _parallel_gc_threads; 165 166 // The number of GC threads currently active. 167 uintx _no_of_gc_threads; 168 169 enum SomePrivateConstants { 170 NumPrevPausesForHeuristics = 10 171 }; 172 173 G1MMUTracker* _mmu_tracker; 174 175 void initialize_flags(); 176 177 void initialize_all() { 178 initialize_flags(); 179 initialize_size_info(); 180 initialize_perm_generation(PermGen::MarkSweepCompact); 181 } 182 183 CollectionSetChooser* _collectionSetChooser; 184 185 double _cur_collection_start_sec; 186 size_t _cur_collection_pause_used_at_start_bytes; 187 uint _cur_collection_pause_used_regions_at_start; 188 double _cur_collection_par_time_ms; 189 190 double _cur_collection_code_root_fixup_time_ms; 191 192 double _cur_clear_ct_time_ms; 193 double _cur_ref_proc_time_ms; 194 double _cur_ref_enq_time_ms; 195 196 #ifndef PRODUCT 197 // Card Table Count Cache stats 198 double _min_clear_cc_time_ms; // min 199 double _max_clear_cc_time_ms; // max 200 double _cur_clear_cc_time_ms; // clearing time during current pause 201 double _cum_clear_cc_time_ms; // cummulative clearing time 202 jlong _num_cc_clears; // number of times the card count cache has been cleared 203 #endif 204 205 // These exclude marking times. 206 TruncatedSeq* _recent_gc_times_ms; 207 208 TruncatedSeq* _concurrent_mark_remark_times_ms; 209 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 210 211 TraceGen0TimeData _trace_gen0_time_data; 212 TraceGen1TimeData _trace_gen1_time_data; 213 214 double _stop_world_start; 215 216 double* _par_last_gc_worker_start_times_ms; 217 double* _par_last_ext_root_scan_times_ms; 218 double* _par_last_satb_filtering_times_ms; 219 double* _par_last_update_rs_times_ms; 220 double* _par_last_update_rs_processed_buffers; 221 double* _par_last_scan_rs_times_ms; 222 double* _par_last_obj_copy_times_ms; 223 double* _par_last_termination_times_ms; 224 double* _par_last_termination_attempts; 225 double* _par_last_gc_worker_end_times_ms; 226 double* _par_last_gc_worker_times_ms; 227 228 // Each workers 'other' time i.e. the elapsed time of the parallel 229 // code executed by a worker minus the sum of the individual sub-phase 230 // times for that worker thread. 231 double* _par_last_gc_worker_other_times_ms; 232 233 // indicates whether we are in young or mixed GC mode 234 bool _gcs_are_young; 235 236 uint _young_list_target_length; 237 uint _young_list_fixed_length; 238 size_t _prev_eden_capacity; // used for logging 239 240 // The max number of regions we can extend the eden by while the GC 241 // locker is active. This should be >= _young_list_target_length; 242 uint _young_list_max_length; 243 244 bool _last_gc_was_young; 245 246 unsigned _young_pause_num; 247 unsigned _mixed_pause_num; 248 249 bool _during_marking; 250 bool _in_marking_window; 251 bool _in_marking_window_im; 252 253 SurvRateGroup* _short_lived_surv_rate_group; 254 SurvRateGroup* _survivor_surv_rate_group; 255 // add here any more surv rate groups 256 257 double _gc_overhead_perc; 258 259 double _reserve_factor; 260 uint _reserve_regions; 261 262 bool during_marking() { 263 return _during_marking; 264 } 265 266 private: 267 enum PredictionConstants { 268 TruncatedSeqLength = 10 269 }; 270 271 TruncatedSeq* _alloc_rate_ms_seq; 272 double _prev_collection_pause_end_ms; 273 274 TruncatedSeq* _pending_card_diff_seq; 275 TruncatedSeq* _rs_length_diff_seq; 276 TruncatedSeq* _cost_per_card_ms_seq; 277 TruncatedSeq* _young_cards_per_entry_ratio_seq; 278 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 279 TruncatedSeq* _cost_per_entry_ms_seq; 280 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 281 TruncatedSeq* _cost_per_byte_ms_seq; 282 TruncatedSeq* _constant_other_time_ms_seq; 283 TruncatedSeq* _young_other_cost_per_region_ms_seq; 284 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 285 286 TruncatedSeq* _pending_cards_seq; 287 TruncatedSeq* _rs_lengths_seq; 288 289 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 290 291 G1YoungGenSizer* _young_gen_sizer; 292 293 uint _eden_cset_region_length; 294 uint _survivor_cset_region_length; 295 uint _old_cset_region_length; 296 297 void init_cset_region_lengths(uint eden_cset_region_length, 298 uint survivor_cset_region_length); 299 300 uint eden_cset_region_length() { return _eden_cset_region_length; } 301 uint survivor_cset_region_length() { return _survivor_cset_region_length; } 302 uint old_cset_region_length() { return _old_cset_region_length; } 303 304 uint _free_regions_at_end_of_collection; 305 306 size_t _recorded_rs_lengths; 307 size_t _max_rs_lengths; 308 309 double _recorded_young_free_cset_time_ms; 310 double _recorded_non_young_free_cset_time_ms; 311 312 double _sigma; 313 314 size_t _rs_lengths_prediction; 315 316 double sigma() { return _sigma; } 317 318 // A function that prevents us putting too much stock in small sample 319 // sets. Returns a number between 2.0 and 1.0, depending on the number 320 // of samples. 5 or more samples yields one; fewer scales linearly from 321 // 2.0 at 1 sample to 1.0 at 5. 322 double confidence_factor(int samples) { 323 if (samples > 4) return 1.0; 324 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 325 } 326 327 double get_new_neg_prediction(TruncatedSeq* seq) { 328 return seq->davg() - sigma() * seq->dsd(); 329 } 330 331 #ifndef PRODUCT 332 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 333 #endif // PRODUCT 334 335 void adjust_concurrent_refinement(double update_rs_time, 336 double update_rs_processed_buffers, 337 double goal_ms); 338 339 uintx no_of_gc_threads() { return _no_of_gc_threads; } 340 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; } 341 342 double _pause_time_target_ms; 343 double _recorded_young_cset_choice_time_ms; 344 double _recorded_non_young_cset_choice_time_ms; 345 size_t _pending_cards; 346 size_t _max_pending_cards; 347 348 public: 349 // Accessors 350 351 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 352 hr->set_young(); 353 hr->install_surv_rate_group(_short_lived_surv_rate_group); 354 hr->set_young_index_in_cset(young_index_in_cset); 355 } 356 357 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 358 assert(hr->is_young() && hr->is_survivor(), "pre-condition"); 359 hr->install_surv_rate_group(_survivor_surv_rate_group); 360 hr->set_young_index_in_cset(young_index_in_cset); 361 } 362 363 #ifndef PRODUCT 364 bool verify_young_ages(); 365 #endif // PRODUCT 366 367 double get_new_prediction(TruncatedSeq* seq) { 368 return MAX2(seq->davg() + sigma() * seq->dsd(), 369 seq->davg() * confidence_factor(seq->num())); 370 } 371 372 void record_max_rs_lengths(size_t rs_lengths) { 373 _max_rs_lengths = rs_lengths; 374 } 375 376 size_t predict_pending_card_diff() { 377 double prediction = get_new_neg_prediction(_pending_card_diff_seq); 378 if (prediction < 0.00001) { 379 return 0; 380 } else { 381 return (size_t) prediction; 382 } 383 } 384 385 size_t predict_pending_cards() { 386 size_t max_pending_card_num = _g1->max_pending_card_num(); 387 size_t diff = predict_pending_card_diff(); 388 size_t prediction; 389 if (diff > max_pending_card_num) { 390 prediction = max_pending_card_num; 391 } else { 392 prediction = max_pending_card_num - diff; 393 } 394 395 return prediction; 396 } 397 398 size_t predict_rs_length_diff() { 399 return (size_t) get_new_prediction(_rs_length_diff_seq); 400 } 401 402 double predict_alloc_rate_ms() { 403 return get_new_prediction(_alloc_rate_ms_seq); 404 } 405 406 double predict_cost_per_card_ms() { 407 return get_new_prediction(_cost_per_card_ms_seq); 408 } 409 410 double predict_rs_update_time_ms(size_t pending_cards) { 411 return (double) pending_cards * predict_cost_per_card_ms(); 412 } 413 414 double predict_young_cards_per_entry_ratio() { 415 return get_new_prediction(_young_cards_per_entry_ratio_seq); 416 } 417 418 double predict_mixed_cards_per_entry_ratio() { 419 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 420 return predict_young_cards_per_entry_ratio(); 421 } else { 422 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 423 } 424 } 425 426 size_t predict_young_card_num(size_t rs_length) { 427 return (size_t) ((double) rs_length * 428 predict_young_cards_per_entry_ratio()); 429 } 430 431 size_t predict_non_young_card_num(size_t rs_length) { 432 return (size_t) ((double) rs_length * 433 predict_mixed_cards_per_entry_ratio()); 434 } 435 436 double predict_rs_scan_time_ms(size_t card_num) { 437 if (gcs_are_young()) { 438 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 439 } else { 440 return predict_mixed_rs_scan_time_ms(card_num); 441 } 442 } 443 444 double predict_mixed_rs_scan_time_ms(size_t card_num) { 445 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 446 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 447 } else { 448 return (double) (card_num * 449 get_new_prediction(_mixed_cost_per_entry_ms_seq)); 450 } 451 } 452 453 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 454 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 455 return (1.1 * (double) bytes_to_copy) * 456 get_new_prediction(_cost_per_byte_ms_seq); 457 } else { 458 return (double) bytes_to_copy * 459 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 460 } 461 } 462 463 double predict_object_copy_time_ms(size_t bytes_to_copy) { 464 if (_in_marking_window && !_in_marking_window_im) { 465 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 466 } else { 467 return (double) bytes_to_copy * 468 get_new_prediction(_cost_per_byte_ms_seq); 469 } 470 } 471 472 double predict_constant_other_time_ms() { 473 return get_new_prediction(_constant_other_time_ms_seq); 474 } 475 476 double predict_young_other_time_ms(size_t young_num) { 477 return (double) young_num * 478 get_new_prediction(_young_other_cost_per_region_ms_seq); 479 } 480 481 double predict_non_young_other_time_ms(size_t non_young_num) { 482 return (double) non_young_num * 483 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 484 } 485 486 double predict_base_elapsed_time_ms(size_t pending_cards); 487 double predict_base_elapsed_time_ms(size_t pending_cards, 488 size_t scanned_cards); 489 size_t predict_bytes_to_copy(HeapRegion* hr); 490 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); 491 492 void set_recorded_rs_lengths(size_t rs_lengths); 493 494 uint cset_region_length() { return young_cset_region_length() + 495 old_cset_region_length(); } 496 uint young_cset_region_length() { return eden_cset_region_length() + 497 survivor_cset_region_length(); } 498 499 void record_young_free_cset_time_ms(double time_ms) { 500 _recorded_young_free_cset_time_ms = time_ms; 501 } 502 503 void record_non_young_free_cset_time_ms(double time_ms) { 504 _recorded_non_young_free_cset_time_ms = time_ms; 505 } 506 507 double predict_survivor_regions_evac_time(); 508 509 void cset_regions_freed() { 510 bool propagate = _last_gc_was_young && !_in_marking_window; 511 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 512 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 513 // also call it on any more surv rate groups 514 } 515 516 G1MMUTracker* mmu_tracker() { 517 return _mmu_tracker; 518 } 519 520 double max_pause_time_ms() { 521 return _mmu_tracker->max_gc_time() * 1000.0; 522 } 523 524 double predict_remark_time_ms() { 525 return get_new_prediction(_concurrent_mark_remark_times_ms); 526 } 527 528 double predict_cleanup_time_ms() { 529 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 530 } 531 532 // Returns an estimate of the survival rate of the region at yg-age 533 // "yg_age". 534 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 535 TruncatedSeq* seq = surv_rate_group->get_seq(age); 536 if (seq->num() == 0) 537 gclog_or_tty->print("BARF! age is %d", age); 538 guarantee( seq->num() > 0, "invariant" ); 539 double pred = get_new_prediction(seq); 540 if (pred > 1.0) 541 pred = 1.0; 542 return pred; 543 } 544 545 double predict_yg_surv_rate(int age) { 546 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 547 } 548 549 double accum_yg_surv_rate_pred(int age) { 550 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 551 } 552 553 private: 554 void print_stats(int level, const char* str, double value); 555 void print_stats(int level, const char* str, double value, int workers); 556 void print_stats(int level, const char* str, int value); 557 558 void print_par_stats(int level, const char* str, double* data, bool showDecimals = true); 559 560 double avg_value (double* data); 561 double max_value (double* data); 562 double sum_of_values (double* data); 563 double max_sum (double* data1, double* data2); 564 565 double _last_pause_time_ms; 566 567 size_t _bytes_in_collection_set_before_gc; 568 size_t _bytes_copied_during_gc; 569 570 // Used to count used bytes in CS. 571 friend class CountCSClosure; 572 573 // Statistics kept per GC stoppage, pause or full. 574 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 575 576 // Add a new GC of the given duration and end time to the record. 577 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 578 579 // The head of the list (via "next_in_collection_set()") representing the 580 // current collection set. Set from the incrementally built collection 581 // set at the start of the pause. 582 HeapRegion* _collection_set; 583 584 // The number of bytes in the collection set before the pause. Set from 585 // the incrementally built collection set at the start of an evacuation 586 // pause. 587 size_t _collection_set_bytes_used_before; 588 589 // The associated information that is maintained while the incremental 590 // collection set is being built with young regions. Used to populate 591 // the recorded info for the evacuation pause. 592 593 enum CSetBuildType { 594 Active, // We are actively building the collection set 595 Inactive // We are not actively building the collection set 596 }; 597 598 CSetBuildType _inc_cset_build_state; 599 600 // The head of the incrementally built collection set. 601 HeapRegion* _inc_cset_head; 602 603 // The tail of the incrementally built collection set. 604 HeapRegion* _inc_cset_tail; 605 606 // The number of bytes in the incrementally built collection set. 607 // Used to set _collection_set_bytes_used_before at the start of 608 // an evacuation pause. 609 size_t _inc_cset_bytes_used_before; 610 611 // Used to record the highest end of heap region in collection set 612 HeapWord* _inc_cset_max_finger; 613 614 // The RSet lengths recorded for regions in the CSet. It is updated 615 // by the thread that adds a new region to the CSet. We assume that 616 // only one thread can be allocating a new CSet region (currently, 617 // it does so after taking the Heap_lock) hence no need to 618 // synchronize updates to this field. 619 size_t _inc_cset_recorded_rs_lengths; 620 621 // A concurrent refinement thread periodcially samples the young 622 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 623 // the RSets grow. Instead of having to syncronize updates to that 624 // field we accumulate them in this field and add it to 625 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 626 ssize_t _inc_cset_recorded_rs_lengths_diffs; 627 628 // The predicted elapsed time it will take to collect the regions in 629 // the CSet. This is updated by the thread that adds a new region to 630 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 631 // MT-safety assumptions. 632 double _inc_cset_predicted_elapsed_time_ms; 633 634 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 635 double _inc_cset_predicted_elapsed_time_ms_diffs; 636 637 // Stash a pointer to the g1 heap. 638 G1CollectedHeap* _g1; 639 640 // The ratio of gc time to elapsed time, computed over recent pauses. 641 double _recent_avg_pause_time_ratio; 642 643 double recent_avg_pause_time_ratio() { 644 return _recent_avg_pause_time_ratio; 645 } 646 647 // At the end of a pause we check the heap occupancy and we decide 648 // whether we will start a marking cycle during the next pause. If 649 // we decide that we want to do that, we will set this parameter to 650 // true. So, this parameter will stay true between the end of a 651 // pause and the beginning of a subsequent pause (not necessarily 652 // the next one, see the comments on the next field) when we decide 653 // that we will indeed start a marking cycle and do the initial-mark 654 // work. 655 volatile bool _initiate_conc_mark_if_possible; 656 657 // If initiate_conc_mark_if_possible() is set at the beginning of a 658 // pause, it is a suggestion that the pause should start a marking 659 // cycle by doing the initial-mark work. However, it is possible 660 // that the concurrent marking thread is still finishing up the 661 // previous marking cycle (e.g., clearing the next marking 662 // bitmap). If that is the case we cannot start a new cycle and 663 // we'll have to wait for the concurrent marking thread to finish 664 // what it is doing. In this case we will postpone the marking cycle 665 // initiation decision for the next pause. When we eventually decide 666 // to start a cycle, we will set _during_initial_mark_pause which 667 // will stay true until the end of the initial-mark pause and it's 668 // the condition that indicates that a pause is doing the 669 // initial-mark work. 670 volatile bool _during_initial_mark_pause; 671 672 bool _last_young_gc; 673 674 // This set of variables tracks the collector efficiency, in order to 675 // determine whether we should initiate a new marking. 676 double _cur_mark_stop_world_time_ms; 677 double _mark_remark_start_sec; 678 double _mark_cleanup_start_sec; 679 double _root_region_scan_wait_time_ms; 680 681 // Update the young list target length either by setting it to the 682 // desired fixed value or by calculating it using G1's pause 683 // prediction model. If no rs_lengths parameter is passed, predict 684 // the RS lengths using the prediction model, otherwise use the 685 // given rs_lengths as the prediction. 686 void update_young_list_target_length(size_t rs_lengths = (size_t) -1); 687 688 // Calculate and return the minimum desired young list target 689 // length. This is the minimum desired young list length according 690 // to the user's inputs. 691 uint calculate_young_list_desired_min_length(uint base_min_length); 692 693 // Calculate and return the maximum desired young list target 694 // length. This is the maximum desired young list length according 695 // to the user's inputs. 696 uint calculate_young_list_desired_max_length(); 697 698 // Calculate and return the maximum young list target length that 699 // can fit into the pause time goal. The parameters are: rs_lengths 700 // represent the prediction of how large the young RSet lengths will 701 // be, base_min_length is the alreay existing number of regions in 702 // the young list, min_length and max_length are the desired min and 703 // max young list length according to the user's inputs. 704 uint calculate_young_list_target_length(size_t rs_lengths, 705 uint base_min_length, 706 uint desired_min_length, 707 uint desired_max_length); 708 709 // Check whether a given young length (young_length) fits into the 710 // given target pause time and whether the prediction for the amount 711 // of objects to be copied for the given length will fit into the 712 // given free space (expressed by base_free_regions). It is used by 713 // calculate_young_list_target_length(). 714 bool predict_will_fit(uint young_length, double base_time_ms, 715 uint base_free_regions, double target_pause_time_ms); 716 717 // Count the number of bytes used in the CS. 718 void count_CS_bytes_used(); 719 720 public: 721 722 G1CollectorPolicy(); 723 724 virtual G1CollectorPolicy* as_g1_policy() { return this; } 725 726 virtual CollectorPolicy::Name kind() { 727 return CollectorPolicy::G1CollectorPolicyKind; 728 } 729 730 // Check the current value of the young list RSet lengths and 731 // compare it against the last prediction. If the current value is 732 // higher, recalculate the young list target length prediction. 733 void revise_young_list_target_length_if_necessary(); 734 735 size_t bytes_in_collection_set() { 736 return _bytes_in_collection_set_before_gc; 737 } 738 739 // This should be called after the heap is resized. 740 void record_new_heap_size(uint new_number_of_regions); 741 742 void init(); 743 744 // Create jstat counters for the policy. 745 virtual void initialize_gc_policy_counters(); 746 747 virtual HeapWord* mem_allocate_work(size_t size, 748 bool is_tlab, 749 bool* gc_overhead_limit_was_exceeded); 750 751 // This method controls how a collector handles one or more 752 // of its generations being fully allocated. 753 virtual HeapWord* satisfy_failed_allocation(size_t size, 754 bool is_tlab); 755 756 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 757 758 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 759 760 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 761 762 // Update the heuristic info to record a collection pause of the given 763 // start time, where the given number of bytes were used at the start. 764 // This may involve changing the desired size of a collection set. 765 766 void record_stop_world_start(); 767 768 void record_collection_pause_start(double start_time_sec, size_t start_used); 769 770 // Must currently be called while the world is stopped. 771 void record_concurrent_mark_init_end(double 772 mark_init_elapsed_time_ms); 773 774 void record_root_region_scan_wait_time(double time_ms) { 775 _root_region_scan_wait_time_ms = time_ms; 776 } 777 778 void record_concurrent_mark_remark_start(); 779 void record_concurrent_mark_remark_end(); 780 781 void record_concurrent_mark_cleanup_start(); 782 void record_concurrent_mark_cleanup_end(int no_of_gc_threads); 783 void record_concurrent_mark_cleanup_completed(); 784 785 void record_concurrent_pause(); 786 void record_concurrent_pause_end(); 787 788 void record_collection_pause_end(int no_of_gc_threads); 789 void print_heap_transition(); 790 791 // Record the fact that a full collection occurred. 792 void record_full_collection_start(); 793 void record_full_collection_end(); 794 795 void record_gc_worker_start_time(int worker_i, double ms) { 796 _par_last_gc_worker_start_times_ms[worker_i] = ms; 797 } 798 799 void record_ext_root_scan_time(int worker_i, double ms) { 800 _par_last_ext_root_scan_times_ms[worker_i] = ms; 801 } 802 803 void record_satb_filtering_time(int worker_i, double ms) { 804 _par_last_satb_filtering_times_ms[worker_i] = ms; 805 } 806 807 void record_update_rs_time(int thread, double ms) { 808 _par_last_update_rs_times_ms[thread] = ms; 809 } 810 811 void record_update_rs_processed_buffers (int thread, 812 double processed_buffers) { 813 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 814 } 815 816 void record_scan_rs_time(int thread, double ms) { 817 _par_last_scan_rs_times_ms[thread] = ms; 818 } 819 820 void reset_obj_copy_time(int thread) { 821 _par_last_obj_copy_times_ms[thread] = 0.0; 822 } 823 824 void reset_obj_copy_time() { 825 reset_obj_copy_time(0); 826 } 827 828 void record_obj_copy_time(int thread, double ms) { 829 _par_last_obj_copy_times_ms[thread] += ms; 830 } 831 832 void record_termination(int thread, double ms, size_t attempts) { 833 _par_last_termination_times_ms[thread] = ms; 834 _par_last_termination_attempts[thread] = (double) attempts; 835 } 836 837 void record_gc_worker_end_time(int worker_i, double ms) { 838 _par_last_gc_worker_end_times_ms[worker_i] = ms; 839 } 840 841 void record_pause_time_ms(double ms) { 842 _last_pause_time_ms = ms; 843 } 844 845 void record_clear_ct_time(double ms) { 846 _cur_clear_ct_time_ms = ms; 847 } 848 849 void record_par_time(double ms) { 850 _cur_collection_par_time_ms = ms; 851 } 852 853 void record_code_root_fixup_time(double ms) { 854 _cur_collection_code_root_fixup_time_ms = ms; 855 } 856 857 void record_ref_proc_time(double ms) { 858 _cur_ref_proc_time_ms = ms; 859 } 860 861 void record_ref_enq_time(double ms) { 862 _cur_ref_enq_time_ms = ms; 863 } 864 865 #ifndef PRODUCT 866 void record_cc_clear_time(double ms) { 867 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) 868 _min_clear_cc_time_ms = ms; 869 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) 870 _max_clear_cc_time_ms = ms; 871 _cur_clear_cc_time_ms = ms; 872 _cum_clear_cc_time_ms += ms; 873 _num_cc_clears++; 874 } 875 #endif 876 877 // Record how much space we copied during a GC. This is typically 878 // called when a GC alloc region is being retired. 879 void record_bytes_copied_during_gc(size_t bytes) { 880 _bytes_copied_during_gc += bytes; 881 } 882 883 // The amount of space we copied during a GC. 884 size_t bytes_copied_during_gc() { 885 return _bytes_copied_during_gc; 886 } 887 888 // Determine whether there are candidate regions so that the 889 // next GC should be mixed. The two action strings are used 890 // in the ergo output when the method returns true or false. 891 bool next_gc_should_be_mixed(const char* true_action_str, 892 const char* false_action_str); 893 894 // Choose a new collection set. Marks the chosen regions as being 895 // "in_collection_set", and links them together. The head and number of 896 // the collection set are available via access methods. 897 void finalize_cset(double target_pause_time_ms); 898 899 // The head of the list (via "next_in_collection_set()") representing the 900 // current collection set. 901 HeapRegion* collection_set() { return _collection_set; } 902 903 void clear_collection_set() { _collection_set = NULL; } 904 905 // Add old region "hr" to the CSet. 906 void add_old_region_to_cset(HeapRegion* hr); 907 908 // Incremental CSet Support 909 910 // The head of the incrementally built collection set. 911 HeapRegion* inc_cset_head() { return _inc_cset_head; } 912 913 // The tail of the incrementally built collection set. 914 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 915 916 // Initialize incremental collection set info. 917 void start_incremental_cset_building(); 918 919 // Perform any final calculations on the incremental CSet fields 920 // before we can use them. 921 void finalize_incremental_cset_building(); 922 923 void clear_incremental_cset() { 924 _inc_cset_head = NULL; 925 _inc_cset_tail = NULL; 926 } 927 928 // Stop adding regions to the incremental collection set 929 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 930 931 // Add information about hr to the aggregated information for the 932 // incrementally built collection set. 933 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 934 935 // Update information about hr in the aggregated information for 936 // the incrementally built collection set. 937 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 938 939 private: 940 // Update the incremental cset information when adding a region 941 // (should not be called directly). 942 void add_region_to_incremental_cset_common(HeapRegion* hr); 943 944 public: 945 // Add hr to the LHS of the incremental collection set. 946 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 947 948 // Add hr to the RHS of the incremental collection set. 949 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 950 951 #ifndef PRODUCT 952 void print_collection_set(HeapRegion* list_head, outputStream* st); 953 #endif // !PRODUCT 954 955 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 956 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 957 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 958 959 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 960 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 961 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 962 963 // This sets the initiate_conc_mark_if_possible() flag to start a 964 // new cycle, as long as we are not already in one. It's best if it 965 // is called during a safepoint when the test whether a cycle is in 966 // progress or not is stable. 967 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 968 969 // This is called at the very beginning of an evacuation pause (it 970 // has to be the first thing that the pause does). If 971 // initiate_conc_mark_if_possible() is true, and the concurrent 972 // marking thread has completed its work during the previous cycle, 973 // it will set during_initial_mark_pause() to so that the pause does 974 // the initial-mark work and start a marking cycle. 975 void decide_on_conc_mark_initiation(); 976 977 // If an expansion would be appropriate, because recent GC overhead had 978 // exceeded the desired limit, return an amount to expand by. 979 size_t expansion_amount(); 980 981 // Print tracing information. 982 void print_tracing_info() const; 983 984 // Print stats on young survival ratio 985 void print_yg_surv_rate_info() const; 986 987 void finished_recalculating_age_indexes(bool is_survivors) { 988 if (is_survivors) { 989 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 990 } else { 991 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 992 } 993 // do that for any other surv rate groups 994 } 995 996 bool is_young_list_full() { 997 uint young_list_length = _g1->young_list()->length(); 998 uint young_list_target_length = _young_list_target_length; 999 return young_list_length >= young_list_target_length; 1000 } 1001 1002 bool can_expand_young_list() { 1003 uint young_list_length = _g1->young_list()->length(); 1004 uint young_list_max_length = _young_list_max_length; 1005 return young_list_length < young_list_max_length; 1006 } 1007 1008 uint young_list_max_length() { 1009 return _young_list_max_length; 1010 } 1011 1012 bool gcs_are_young() { 1013 return _gcs_are_young; 1014 } 1015 void set_gcs_are_young(bool gcs_are_young) { 1016 _gcs_are_young = gcs_are_young; 1017 } 1018 1019 bool adaptive_young_list_length() { 1020 return _young_gen_sizer->adaptive_young_list_length(); 1021 } 1022 1023 private: 1024 // 1025 // Survivor regions policy. 1026 // 1027 1028 // Current tenuring threshold, set to 0 if the collector reaches the 1029 // maximum amount of suvivors regions. 1030 int _tenuring_threshold; 1031 1032 // The limit on the number of regions allocated for survivors. 1033 uint _max_survivor_regions; 1034 1035 // For reporting purposes. 1036 size_t _eden_bytes_before_gc; 1037 size_t _survivor_bytes_before_gc; 1038 size_t _capacity_before_gc; 1039 1040 // The amount of survor regions after a collection. 1041 uint _recorded_survivor_regions; 1042 // List of survivor regions. 1043 HeapRegion* _recorded_survivor_head; 1044 HeapRegion* _recorded_survivor_tail; 1045 1046 ageTable _survivors_age_table; 1047 1048 public: 1049 1050 inline GCAllocPurpose 1051 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { 1052 if (age < _tenuring_threshold && src_region->is_young()) { 1053 return GCAllocForSurvived; 1054 } else { 1055 return GCAllocForTenured; 1056 } 1057 } 1058 1059 inline bool track_object_age(GCAllocPurpose purpose) { 1060 return purpose == GCAllocForSurvived; 1061 } 1062 1063 static const uint REGIONS_UNLIMITED = (uint) -1; 1064 1065 uint max_regions(int purpose); 1066 1067 // The limit on regions for a particular purpose is reached. 1068 void note_alloc_region_limit_reached(int purpose) { 1069 if (purpose == GCAllocForSurvived) { 1070 _tenuring_threshold = 0; 1071 } 1072 } 1073 1074 void note_start_adding_survivor_regions() { 1075 _survivor_surv_rate_group->start_adding_regions(); 1076 } 1077 1078 void note_stop_adding_survivor_regions() { 1079 _survivor_surv_rate_group->stop_adding_regions(); 1080 } 1081 1082 void record_survivor_regions(uint regions, 1083 HeapRegion* head, 1084 HeapRegion* tail) { 1085 _recorded_survivor_regions = regions; 1086 _recorded_survivor_head = head; 1087 _recorded_survivor_tail = tail; 1088 } 1089 1090 uint recorded_survivor_regions() { 1091 return _recorded_survivor_regions; 1092 } 1093 1094 void record_thread_age_table(ageTable* age_table) { 1095 _survivors_age_table.merge_par(age_table); 1096 } 1097 1098 void update_max_gc_locker_expansion(); 1099 1100 // Calculates survivor space parameters. 1101 void update_survivors_policy(); 1102 1103 }; 1104 1105 // This should move to some place more general... 1106 1107 // If we have "n" measurements, and we've kept track of their "sum" and the 1108 // "sum_of_squares" of the measurements, this returns the variance of the 1109 // sequence. 1110 inline double variance(int n, double sum_of_squares, double sum) { 1111 double n_d = (double)n; 1112 double avg = sum/n_d; 1113 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 1114 } 1115 1116 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP