1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc/g1/collectionSetChooser.hpp" 29 #include "gc/g1/g1CollectorState.hpp" 30 #include "gc/g1/g1InCSetState.hpp" 31 #include "gc/g1/g1MMUTracker.hpp" 32 #include "gc/g1/g1Predictions.hpp" 33 #include "gc/shared/collectorPolicy.hpp" 34 35 // A G1CollectorPolicy makes policy decisions that determine the 36 // characteristics of the collector. Examples include: 37 // * choice of collection set. 38 // * when to collect. 39 40 class HeapRegion; 41 class CollectionSetChooser; 42 class G1GCPhaseTimes; 43 44 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses 45 // (the latter may contain non-young regions - i.e. regions that are 46 // technically in old) while TraceOldGenTime collects data about full GCs. 47 class TraceYoungGenTimeData : public CHeapObj<mtGC> { 48 private: 49 unsigned _young_pause_num; 50 unsigned _mixed_pause_num; 51 52 NumberSeq _all_stop_world_times_ms; 53 NumberSeq _all_yield_times_ms; 54 55 NumberSeq _total; 56 NumberSeq _other; 57 NumberSeq _root_region_scan_wait; 58 NumberSeq _parallel; 59 NumberSeq _ext_root_scan; 60 NumberSeq _satb_filtering; 61 NumberSeq _update_rs; 62 NumberSeq _scan_rs; 63 NumberSeq _obj_copy; 64 NumberSeq _termination; 65 NumberSeq _parallel_other; 66 NumberSeq _clear_ct; 67 68 void print_summary(const char* str, const NumberSeq* seq) const; 69 void print_summary_sd(const char* str, const NumberSeq* seq) const; 70 71 public: 72 TraceYoungGenTimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; 73 void record_start_collection(double time_to_stop_the_world_ms); 74 void record_yield_time(double yield_time_ms); 75 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times); 76 void increment_young_collection_count(); 77 void increment_mixed_collection_count(); 78 void print() const; 79 }; 80 81 class TraceOldGenTimeData : public CHeapObj<mtGC> { 82 private: 83 NumberSeq _all_full_gc_times; 84 85 public: 86 void record_full_collection(double full_gc_time_ms); 87 void print() const; 88 }; 89 90 // There are three command line options related to the young gen size: 91 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 92 // just a short form for NewSize==MaxNewSize). G1 will use its internal 93 // heuristics to calculate the actual young gen size, so these options 94 // basically only limit the range within which G1 can pick a young gen 95 // size. Also, these are general options taking byte sizes. G1 will 96 // internally work with a number of regions instead. So, some rounding 97 // will occur. 98 // 99 // If nothing related to the the young gen size is set on the command 100 // line we should allow the young gen to be between G1NewSizePercent 101 // and G1MaxNewSizePercent of the heap size. This means that every time 102 // the heap size changes, the limits for the young gen size will be 103 // recalculated. 104 // 105 // If only -XX:NewSize is set we should use the specified value as the 106 // minimum size for young gen. Still using G1MaxNewSizePercent of the 107 // heap as maximum. 108 // 109 // If only -XX:MaxNewSize is set we should use the specified value as the 110 // maximum size for young gen. Still using G1NewSizePercent of the heap 111 // as minimum. 112 // 113 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 114 // No updates when the heap size changes. There is a special case when 115 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 116 // different heuristic for calculating the collection set when we do mixed 117 // collection. 118 // 119 // If only -XX:NewRatio is set we should use the specified ratio of the heap 120 // as both min and max. This will be interpreted as "fixed" just like the 121 // NewSize==MaxNewSize case above. But we will update the min and max 122 // every time the heap size changes. 123 // 124 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 125 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 126 class G1YoungGenSizer : public CHeapObj<mtGC> { 127 private: 128 enum SizerKind { 129 SizerDefaults, 130 SizerNewSizeOnly, 131 SizerMaxNewSizeOnly, 132 SizerMaxAndNewSize, 133 SizerNewRatio 134 }; 135 SizerKind _sizer_kind; 136 uint _min_desired_young_length; 137 uint _max_desired_young_length; 138 bool _adaptive_size; 139 uint calculate_default_min_length(uint new_number_of_heap_regions); 140 uint calculate_default_max_length(uint new_number_of_heap_regions); 141 142 // Update the given values for minimum and maximum young gen length in regions 143 // given the number of heap regions depending on the kind of sizing algorithm. 144 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); 145 146 public: 147 G1YoungGenSizer(); 148 // Calculate the maximum length of the young gen given the number of regions 149 // depending on the sizing algorithm. 150 uint max_young_length(uint number_of_heap_regions); 151 152 void heap_size_changed(uint new_number_of_heap_regions); 153 uint min_desired_young_length() { 154 return _min_desired_young_length; 155 } 156 uint max_desired_young_length() { 157 return _max_desired_young_length; 158 } 159 bool adaptive_young_list_length() const { 160 return _adaptive_size; 161 } 162 }; 163 164 class G1CollectorPolicy: public CollectorPolicy { 165 private: 166 G1Predictions _predictor; 167 168 double get_new_prediction(TruncatedSeq const* seq) const; 169 170 // either equal to the number of parallel threads, if ParallelGCThreads 171 // has been set, or 1 otherwise 172 int _parallel_gc_threads; 173 174 // The number of GC threads currently active. 175 uintx _no_of_gc_threads; 176 177 G1MMUTracker* _mmu_tracker; 178 179 void initialize_alignments(); 180 void initialize_flags(); 181 182 CollectionSetChooser* _collectionSetChooser; 183 184 double _full_collection_start_sec; 185 uint _cur_collection_pause_used_regions_at_start; 186 187 // These exclude marking times. 188 TruncatedSeq* _recent_gc_times_ms; 189 190 TruncatedSeq* _concurrent_mark_remark_times_ms; 191 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 192 193 TraceYoungGenTimeData _trace_young_gen_time_data; 194 TraceOldGenTimeData _trace_old_gen_time_data; 195 196 double _stop_world_start; 197 198 uint _young_list_target_length; 199 uint _young_list_fixed_length; 200 201 // The max number of regions we can extend the eden by while the GC 202 // locker is active. This should be >= _young_list_target_length; 203 uint _young_list_max_length; 204 205 SurvRateGroup* _short_lived_surv_rate_group; 206 SurvRateGroup* _survivor_surv_rate_group; 207 // add here any more surv rate groups 208 209 double _gc_overhead_perc; 210 211 double _reserve_factor; 212 uint _reserve_regions; 213 214 enum PredictionConstants { 215 TruncatedSeqLength = 10, 216 NumPrevPausesForHeuristics = 10 217 }; 218 219 TruncatedSeq* _alloc_rate_ms_seq; 220 double _prev_collection_pause_end_ms; 221 222 TruncatedSeq* _rs_length_diff_seq; 223 TruncatedSeq* _cost_per_card_ms_seq; 224 TruncatedSeq* _cost_scan_hcc_seq; 225 TruncatedSeq* _young_cards_per_entry_ratio_seq; 226 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 227 TruncatedSeq* _cost_per_entry_ms_seq; 228 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 229 TruncatedSeq* _cost_per_byte_ms_seq; 230 TruncatedSeq* _constant_other_time_ms_seq; 231 TruncatedSeq* _young_other_cost_per_region_ms_seq; 232 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 233 234 TruncatedSeq* _pending_cards_seq; 235 TruncatedSeq* _rs_lengths_seq; 236 237 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 238 239 G1YoungGenSizer* _young_gen_sizer; 240 241 uint _eden_cset_region_length; 242 uint _survivor_cset_region_length; 243 uint _old_cset_region_length; 244 245 void init_cset_region_lengths(uint eden_cset_region_length, 246 uint survivor_cset_region_length); 247 248 uint eden_cset_region_length() const { return _eden_cset_region_length; } 249 uint survivor_cset_region_length() const { return _survivor_cset_region_length; } 250 uint old_cset_region_length() const { return _old_cset_region_length; } 251 252 uint _free_regions_at_end_of_collection; 253 254 size_t _recorded_rs_lengths; 255 size_t _max_rs_lengths; 256 257 size_t _rs_lengths_prediction; 258 259 #ifndef PRODUCT 260 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 261 #endif // PRODUCT 262 263 void adjust_concurrent_refinement(double update_rs_time, 264 double update_rs_processed_buffers, 265 double goal_ms); 266 267 uintx no_of_gc_threads() { return _no_of_gc_threads; } 268 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; } 269 270 double _pause_time_target_ms; 271 272 size_t _pending_cards; 273 274 public: 275 G1Predictions& predictor() { return _predictor; } 276 277 // Accessors 278 279 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 280 hr->set_eden(); 281 hr->install_surv_rate_group(_short_lived_surv_rate_group); 282 hr->set_young_index_in_cset(young_index_in_cset); 283 } 284 285 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 286 assert(hr->is_survivor(), "pre-condition"); 287 hr->install_surv_rate_group(_survivor_surv_rate_group); 288 hr->set_young_index_in_cset(young_index_in_cset); 289 } 290 291 #ifndef PRODUCT 292 bool verify_young_ages(); 293 #endif // PRODUCT 294 295 void record_max_rs_lengths(size_t rs_lengths) { 296 _max_rs_lengths = rs_lengths; 297 } 298 299 size_t predict_rs_length_diff() const; 300 301 double predict_alloc_rate_ms() const; 302 303 double predict_cost_per_card_ms() const; 304 305 double predict_scan_hcc_ms() const; 306 307 double predict_rs_update_time_ms(size_t pending_cards) const; 308 309 double predict_young_cards_per_entry_ratio() const; 310 311 double predict_mixed_cards_per_entry_ratio() const; 312 313 size_t predict_young_card_num(size_t rs_length) const; 314 315 size_t predict_non_young_card_num(size_t rs_length) const; 316 317 double predict_rs_scan_time_ms(size_t card_num) const; 318 319 double predict_mixed_rs_scan_time_ms(size_t card_num) const; 320 321 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; 322 323 double predict_object_copy_time_ms(size_t bytes_to_copy) const; 324 325 double predict_constant_other_time_ms() const; 326 327 double predict_young_other_time_ms(size_t young_num) const; 328 329 double predict_non_young_other_time_ms(size_t non_young_num) const; 330 331 double predict_base_elapsed_time_ms(size_t pending_cards) const; 332 double predict_base_elapsed_time_ms(size_t pending_cards, 333 size_t scanned_cards) const; 334 size_t predict_bytes_to_copy(HeapRegion* hr) const; 335 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; 336 337 void set_recorded_rs_lengths(size_t rs_lengths); 338 339 uint cset_region_length() const { return young_cset_region_length() + 340 old_cset_region_length(); } 341 uint young_cset_region_length() const { return eden_cset_region_length() + 342 survivor_cset_region_length(); } 343 344 double predict_survivor_regions_evac_time() const; 345 346 bool should_update_surv_rate_group_predictors() { 347 return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window(); 348 } 349 350 void cset_regions_freed() { 351 bool update = should_update_surv_rate_group_predictors(); 352 353 _short_lived_surv_rate_group->all_surviving_words_recorded(update); 354 _survivor_surv_rate_group->all_surviving_words_recorded(update); 355 } 356 357 G1MMUTracker* mmu_tracker() { 358 return _mmu_tracker; 359 } 360 361 const G1MMUTracker* mmu_tracker() const { 362 return _mmu_tracker; 363 } 364 365 double max_pause_time_ms() const { 366 return _mmu_tracker->max_gc_time() * 1000.0; 367 } 368 369 double predict_remark_time_ms() const; 370 371 double predict_cleanup_time_ms() const; 372 373 // Returns an estimate of the survival rate of the region at yg-age 374 // "yg_age". 375 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; 376 377 double predict_yg_surv_rate(int age) const; 378 379 double accum_yg_surv_rate_pred(int age) const; 380 381 private: 382 // Statistics kept per GC stoppage, pause or full. 383 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 384 385 // Add a new GC of the given duration and end time to the record. 386 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 387 388 // The head of the list (via "next_in_collection_set()") representing the 389 // current collection set. Set from the incrementally built collection 390 // set at the start of the pause. 391 HeapRegion* _collection_set; 392 393 // The number of bytes in the collection set before the pause. Set from 394 // the incrementally built collection set at the start of an evacuation 395 // pause, and incremented in finalize_old_cset_part() when adding old regions 396 // (if any) to the collection set. 397 size_t _collection_set_bytes_used_before; 398 399 // The number of bytes copied during the GC. 400 size_t _bytes_copied_during_gc; 401 402 // The associated information that is maintained while the incremental 403 // collection set is being built with young regions. Used to populate 404 // the recorded info for the evacuation pause. 405 406 enum CSetBuildType { 407 Active, // We are actively building the collection set 408 Inactive // We are not actively building the collection set 409 }; 410 411 CSetBuildType _inc_cset_build_state; 412 413 // The head of the incrementally built collection set. 414 HeapRegion* _inc_cset_head; 415 416 // The tail of the incrementally built collection set. 417 HeapRegion* _inc_cset_tail; 418 419 // The number of bytes in the incrementally built collection set. 420 // Used to set _collection_set_bytes_used_before at the start of 421 // an evacuation pause. 422 size_t _inc_cset_bytes_used_before; 423 424 // Used to record the highest end of heap region in collection set 425 HeapWord* _inc_cset_max_finger; 426 427 // The RSet lengths recorded for regions in the CSet. It is updated 428 // by the thread that adds a new region to the CSet. We assume that 429 // only one thread can be allocating a new CSet region (currently, 430 // it does so after taking the Heap_lock) hence no need to 431 // synchronize updates to this field. 432 size_t _inc_cset_recorded_rs_lengths; 433 434 // A concurrent refinement thread periodically samples the young 435 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 436 // the RSets grow. Instead of having to synchronize updates to that 437 // field we accumulate them in this field and add it to 438 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 439 ssize_t _inc_cset_recorded_rs_lengths_diffs; 440 441 // The predicted elapsed time it will take to collect the regions in 442 // the CSet. This is updated by the thread that adds a new region to 443 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 444 // MT-safety assumptions. 445 double _inc_cset_predicted_elapsed_time_ms; 446 447 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 448 double _inc_cset_predicted_elapsed_time_ms_diffs; 449 450 // Stash a pointer to the g1 heap. 451 G1CollectedHeap* _g1; 452 453 G1GCPhaseTimes* _phase_times; 454 455 // The ratio of gc time to elapsed time, computed over recent pauses. 456 double _recent_avg_pause_time_ratio; 457 458 double recent_avg_pause_time_ratio() const { 459 return _recent_avg_pause_time_ratio; 460 } 461 462 // This set of variables tracks the collector efficiency, in order to 463 // determine whether we should initiate a new marking. 464 double _cur_mark_stop_world_time_ms; 465 double _mark_remark_start_sec; 466 double _mark_cleanup_start_sec; 467 468 void update_young_list_max_and_target_length(); 469 void update_young_list_max_and_target_length(size_t rs_lengths); 470 471 // Update the young list target length either by setting it to the 472 // desired fixed value or by calculating it using G1's pause 473 // prediction model. If no rs_lengths parameter is passed, predict 474 // the RS lengths using the prediction model, otherwise use the 475 // given rs_lengths as the prediction. 476 void update_young_list_target_length(); 477 void update_young_list_target_length(size_t rs_lengths); 478 479 // Calculate and return the minimum desired young list target 480 // length. This is the minimum desired young list length according 481 // to the user's inputs. 482 uint calculate_young_list_desired_min_length(uint base_min_length) const; 483 484 // Calculate and return the maximum desired young list target 485 // length. This is the maximum desired young list length according 486 // to the user's inputs. 487 uint calculate_young_list_desired_max_length() const; 488 489 // Calculate and return the maximum young list target length that 490 // can fit into the pause time goal. The parameters are: rs_lengths 491 // represent the prediction of how large the young RSet lengths will 492 // be, base_min_length is the already existing number of regions in 493 // the young list, min_length and max_length are the desired min and 494 // max young list length according to the user's inputs. 495 uint calculate_young_list_target_length(size_t rs_lengths, 496 uint base_min_length, 497 uint desired_min_length, 498 uint desired_max_length) const; 499 500 uint bounded_young_list_target_length(size_t rs_lengths) const; 501 502 void update_rs_lengths_prediction(); 503 void update_rs_lengths_prediction(size_t prediction); 504 505 // Calculate and return chunk size (in number of regions) for parallel 506 // concurrent mark cleanup. 507 uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const; 508 509 // Check whether a given young length (young_length) fits into the 510 // given target pause time and whether the prediction for the amount 511 // of objects to be copied for the given length will fit into the 512 // given free space (expressed by base_free_regions). It is used by 513 // calculate_young_list_target_length(). 514 bool predict_will_fit(uint young_length, double base_time_ms, 515 uint base_free_regions, double target_pause_time_ms) const; 516 517 // Calculate the minimum number of old regions we'll add to the CSet 518 // during a mixed GC. 519 uint calc_min_old_cset_length() const; 520 521 // Calculate the maximum number of old regions we'll add to the CSet 522 // during a mixed GC. 523 uint calc_max_old_cset_length() const; 524 525 // Returns the given amount of uncollected reclaimable space 526 // as a percentage of the current heap capacity. 527 double reclaimable_bytes_perc(size_t reclaimable_bytes) const; 528 529 // Sets up marking if proper conditions are met. 530 void maybe_start_marking(); 531 public: 532 533 G1CollectorPolicy(); 534 535 virtual G1CollectorPolicy* as_g1_policy() { return this; } 536 537 G1CollectorState* collector_state() const; 538 539 G1GCPhaseTimes* phase_times() const { return _phase_times; } 540 541 // Check the current value of the young list RSet lengths and 542 // compare it against the last prediction. If the current value is 543 // higher, recalculate the young list target length prediction. 544 void revise_young_list_target_length_if_necessary(); 545 546 // This should be called after the heap is resized. 547 void record_new_heap_size(uint new_number_of_regions); 548 549 void init(); 550 551 // Create jstat counters for the policy. 552 virtual void initialize_gc_policy_counters(); 553 554 virtual HeapWord* mem_allocate_work(size_t size, 555 bool is_tlab, 556 bool* gc_overhead_limit_was_exceeded); 557 558 // This method controls how a collector handles one or more 559 // of its generations being fully allocated. 560 virtual HeapWord* satisfy_failed_allocation(size_t size, 561 bool is_tlab); 562 563 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 564 565 // Record the start and end of an evacuation pause. 566 void record_collection_pause_start(double start_time_sec); 567 void record_collection_pause_end(double pause_time_ms, size_t cards_scanned); 568 569 // Record the start and end of a full collection. 570 void record_full_collection_start(); 571 void record_full_collection_end(); 572 573 // Must currently be called while the world is stopped. 574 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); 575 576 // Record start and end of remark. 577 void record_concurrent_mark_remark_start(); 578 void record_concurrent_mark_remark_end(); 579 580 // Record start, end, and completion of cleanup. 581 void record_concurrent_mark_cleanup_start(); 582 void record_concurrent_mark_cleanup_end(); 583 void record_concurrent_mark_cleanup_completed(); 584 585 // Records the information about the heap size for reporting in 586 // print_detailed_heap_transition 587 void record_heap_size_info_at_start(bool full); 588 589 // Print heap sizing transition (with less and more detail). 590 591 void print_heap_transition(size_t bytes_before) const; 592 void print_heap_transition() const; 593 void print_detailed_heap_transition(bool full = false) const; 594 595 void record_stop_world_start(); 596 void record_concurrent_pause(); 597 598 // Record how much space we copied during a GC. This is typically 599 // called when a GC alloc region is being retired. 600 void record_bytes_copied_during_gc(size_t bytes) { 601 _bytes_copied_during_gc += bytes; 602 } 603 604 // The amount of space we copied during a GC. 605 size_t bytes_copied_during_gc() const { 606 return _bytes_copied_during_gc; 607 } 608 609 size_t collection_set_bytes_used_before() const { 610 return _collection_set_bytes_used_before; 611 } 612 613 // Determine whether there are candidate regions so that the 614 // next GC should be mixed. The two action strings are used 615 // in the ergo output when the method returns true or false. 616 bool next_gc_should_be_mixed(const char* true_action_str, 617 const char* false_action_str) const; 618 619 // Choose a new collection set. Marks the chosen regions as being 620 // "in_collection_set", and links them together. The head and number of 621 // the collection set are available via access methods. 622 double finalize_young_cset_part(double target_pause_time_ms); 623 virtual void finalize_old_cset_part(double time_remaining_ms); 624 625 // The head of the list (via "next_in_collection_set()") representing the 626 // current collection set. 627 HeapRegion* collection_set() { return _collection_set; } 628 629 void clear_collection_set() { _collection_set = NULL; } 630 631 // Add old region "hr" to the CSet. 632 void add_old_region_to_cset(HeapRegion* hr); 633 634 // Incremental CSet Support 635 636 // The head of the incrementally built collection set. 637 HeapRegion* inc_cset_head() { return _inc_cset_head; } 638 639 // The tail of the incrementally built collection set. 640 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 641 642 // Initialize incremental collection set info. 643 void start_incremental_cset_building(); 644 645 // Perform any final calculations on the incremental CSet fields 646 // before we can use them. 647 void finalize_incremental_cset_building(); 648 649 void clear_incremental_cset() { 650 _inc_cset_head = NULL; 651 _inc_cset_tail = NULL; 652 } 653 654 // Stop adding regions to the incremental collection set 655 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 656 657 // Add information about hr to the aggregated information for the 658 // incrementally built collection set. 659 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 660 661 // Update information about hr in the aggregated information for 662 // the incrementally built collection set. 663 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 664 665 private: 666 // Update the incremental cset information when adding a region 667 // (should not be called directly). 668 void add_region_to_incremental_cset_common(HeapRegion* hr); 669 670 public: 671 // Add hr to the LHS of the incremental collection set. 672 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 673 674 // Add hr to the RHS of the incremental collection set. 675 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 676 677 #ifndef PRODUCT 678 void print_collection_set(HeapRegion* list_head, outputStream* st); 679 #endif // !PRODUCT 680 681 // This sets the initiate_conc_mark_if_possible() flag to start a 682 // new cycle, as long as we are not already in one. It's best if it 683 // is called during a safepoint when the test whether a cycle is in 684 // progress or not is stable. 685 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 686 687 // This is called at the very beginning of an evacuation pause (it 688 // has to be the first thing that the pause does). If 689 // initiate_conc_mark_if_possible() is true, and the concurrent 690 // marking thread has completed its work during the previous cycle, 691 // it will set during_initial_mark_pause() to so that the pause does 692 // the initial-mark work and start a marking cycle. 693 void decide_on_conc_mark_initiation(); 694 695 // If an expansion would be appropriate, because recent GC overhead had 696 // exceeded the desired limit, return an amount to expand by. 697 virtual size_t expansion_amount() const; 698 699 // Print tracing information. 700 void print_tracing_info() const; 701 702 // Print stats on young survival ratio 703 void print_yg_surv_rate_info() const; 704 705 void finished_recalculating_age_indexes(bool is_survivors) { 706 if (is_survivors) { 707 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 708 } else { 709 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 710 } 711 // do that for any other surv rate groups 712 } 713 714 size_t young_list_target_length() const { return _young_list_target_length; } 715 716 bool is_young_list_full() const; 717 718 bool can_expand_young_list() const; 719 720 uint young_list_max_length() const { 721 return _young_list_max_length; 722 } 723 724 bool adaptive_young_list_length() const { 725 return _young_gen_sizer->adaptive_young_list_length(); 726 } 727 728 private: 729 // 730 // Survivor regions policy. 731 // 732 733 // Current tenuring threshold, set to 0 if the collector reaches the 734 // maximum amount of survivors regions. 735 uint _tenuring_threshold; 736 737 // The limit on the number of regions allocated for survivors. 738 uint _max_survivor_regions; 739 740 // For reporting purposes. 741 // The value of _heap_bytes_before_gc is also used to calculate 742 // the cost of copying. 743 744 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC 745 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC 746 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC 747 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC 748 749 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC 750 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC 751 752 // The amount of survivor regions after a collection. 753 uint _recorded_survivor_regions; 754 // List of survivor regions. 755 HeapRegion* _recorded_survivor_head; 756 HeapRegion* _recorded_survivor_tail; 757 758 ageTable _survivors_age_table; 759 760 public: 761 uint tenuring_threshold() const { return _tenuring_threshold; } 762 763 static const uint REGIONS_UNLIMITED = (uint) -1; 764 765 uint max_regions(InCSetState dest) const { 766 switch (dest.value()) { 767 case InCSetState::Young: 768 return _max_survivor_regions; 769 case InCSetState::Old: 770 return REGIONS_UNLIMITED; 771 default: 772 assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value()); 773 break; 774 } 775 // keep some compilers happy 776 return 0; 777 } 778 779 void note_start_adding_survivor_regions() { 780 _survivor_surv_rate_group->start_adding_regions(); 781 } 782 783 void note_stop_adding_survivor_regions() { 784 _survivor_surv_rate_group->stop_adding_regions(); 785 } 786 787 void record_survivor_regions(uint regions, 788 HeapRegion* head, 789 HeapRegion* tail) { 790 _recorded_survivor_regions = regions; 791 _recorded_survivor_head = head; 792 _recorded_survivor_tail = tail; 793 } 794 795 uint recorded_survivor_regions() const { 796 return _recorded_survivor_regions; 797 } 798 799 void record_age_table(ageTable* age_table) { 800 _survivors_age_table.merge(age_table); 801 } 802 803 void update_max_gc_locker_expansion(); 804 805 // Calculates survivor space parameters. 806 void update_survivors_policy(); 807 808 virtual void post_heap_initialize(); 809 }; 810 811 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP