1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc/g1/collectionSetChooser.hpp" 29 #include "gc/g1/g1CollectorState.hpp" 30 #include "gc/g1/g1GCPhaseTimes.hpp" 31 #include "gc/g1/g1InCSetState.hpp" 32 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" 33 #include "gc/g1/g1MMUTracker.hpp" 34 #include "gc/g1/g1Predictions.hpp" 35 #include "gc/shared/collectorPolicy.hpp" 36 #include "utilities/pair.hpp" 37 38 // A G1CollectorPolicy makes policy decisions that determine the 39 // characteristics of the collector. Examples include: 40 // * choice of collection set. 41 // * when to collect. 42 43 class HeapRegion; 44 class CollectionSetChooser; 45 class G1IHOPControl; 46 47 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses 48 // (the latter may contain non-young regions - i.e. regions that are 49 // technically in old) while TraceOldGenTime collects data about full GCs. 50 class TraceYoungGenTimeData : public CHeapObj<mtGC> { 51 private: 52 unsigned _young_pause_num; 53 unsigned _mixed_pause_num; 54 55 NumberSeq _all_stop_world_times_ms; 56 NumberSeq _all_yield_times_ms; 57 58 NumberSeq _total; 59 NumberSeq _other; 60 NumberSeq _root_region_scan_wait; 61 NumberSeq _parallel; 62 NumberSeq _ext_root_scan; 63 NumberSeq _satb_filtering; 64 NumberSeq _update_rs; 65 NumberSeq _scan_rs; 66 NumberSeq _obj_copy; 67 NumberSeq _termination; 68 NumberSeq _parallel_other; 69 NumberSeq _clear_ct; 70 71 void print_summary(const char* str, const NumberSeq* seq) const; 72 void print_summary_sd(const char* str, const NumberSeq* seq) const; 73 74 public: 75 TraceYoungGenTimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; 76 void record_start_collection(double time_to_stop_the_world_ms); 77 void record_yield_time(double yield_time_ms); 78 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times); 79 void increment_young_collection_count(); 80 void increment_mixed_collection_count(); 81 void print() const; 82 }; 83 84 class TraceOldGenTimeData : public CHeapObj<mtGC> { 85 private: 86 NumberSeq _all_full_gc_times; 87 88 public: 89 void record_full_collection(double full_gc_time_ms); 90 void print() const; 91 }; 92 93 // There are three command line options related to the young gen size: 94 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 95 // just a short form for NewSize==MaxNewSize). G1 will use its internal 96 // heuristics to calculate the actual young gen size, so these options 97 // basically only limit the range within which G1 can pick a young gen 98 // size. Also, these are general options taking byte sizes. G1 will 99 // internally work with a number of regions instead. So, some rounding 100 // will occur. 101 // 102 // If nothing related to the the young gen size is set on the command 103 // line we should allow the young gen to be between G1NewSizePercent 104 // and G1MaxNewSizePercent of the heap size. This means that every time 105 // the heap size changes, the limits for the young gen size will be 106 // recalculated. 107 // 108 // If only -XX:NewSize is set we should use the specified value as the 109 // minimum size for young gen. Still using G1MaxNewSizePercent of the 110 // heap as maximum. 111 // 112 // If only -XX:MaxNewSize is set we should use the specified value as the 113 // maximum size for young gen. Still using G1NewSizePercent of the heap 114 // as minimum. 115 // 116 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 117 // No updates when the heap size changes. There is a special case when 118 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 119 // different heuristic for calculating the collection set when we do mixed 120 // collection. 121 // 122 // If only -XX:NewRatio is set we should use the specified ratio of the heap 123 // as both min and max. This will be interpreted as "fixed" just like the 124 // NewSize==MaxNewSize case above. But we will update the min and max 125 // every time the heap size changes. 126 // 127 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 128 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 129 class G1YoungGenSizer : public CHeapObj<mtGC> { 130 private: 131 enum SizerKind { 132 SizerDefaults, 133 SizerNewSizeOnly, 134 SizerMaxNewSizeOnly, 135 SizerMaxAndNewSize, 136 SizerNewRatio 137 }; 138 SizerKind _sizer_kind; 139 uint _min_desired_young_length; 140 uint _max_desired_young_length; 141 bool _adaptive_size; 142 uint calculate_default_min_length(uint new_number_of_heap_regions); 143 uint calculate_default_max_length(uint new_number_of_heap_regions); 144 145 // Update the given values for minimum and maximum young gen length in regions 146 // given the number of heap regions depending on the kind of sizing algorithm. 147 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); 148 149 public: 150 G1YoungGenSizer(); 151 // Calculate the maximum length of the young gen given the number of regions 152 // depending on the sizing algorithm. 153 uint max_young_length(uint number_of_heap_regions); 154 155 void heap_size_changed(uint new_number_of_heap_regions); 156 uint min_desired_young_length() { 157 return _min_desired_young_length; 158 } 159 uint max_desired_young_length() { 160 return _max_desired_young_length; 161 } 162 bool adaptive_young_list_length() const { 163 return _adaptive_size; 164 } 165 }; 166 167 class G1CollectorPolicy: public CollectorPolicy { 168 private: 169 G1IHOPControl* _ihop_control; 170 171 G1IHOPControl* create_ihop_control() const; 172 // Update the IHOP control with necessary statistics. 173 void update_ihop_prediction(double mutator_time_s, 174 size_t mutator_alloc_bytes, 175 size_t young_gen_size); 176 void report_ihop_statistics(); 177 178 G1Predictions _predictor; 179 180 double get_new_prediction(TruncatedSeq const* seq) const; 181 182 // either equal to the number of parallel threads, if ParallelGCThreads 183 // has been set, or 1 otherwise 184 int _parallel_gc_threads; 185 186 // The number of GC threads currently active. 187 uintx _no_of_gc_threads; 188 189 G1MMUTracker* _mmu_tracker; 190 191 void initialize_alignments(); 192 void initialize_flags(); 193 194 CollectionSetChooser* _collectionSetChooser; 195 196 double _full_collection_start_sec; 197 198 // These exclude marking times. 199 TruncatedSeq* _recent_gc_times_ms; 200 201 TruncatedSeq* _concurrent_mark_remark_times_ms; 202 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 203 204 TraceYoungGenTimeData _trace_young_gen_time_data; 205 TraceOldGenTimeData _trace_old_gen_time_data; 206 207 double _stop_world_start; 208 209 uint _young_list_target_length; 210 uint _young_list_fixed_length; 211 212 // The max number of regions we can extend the eden by while the GC 213 // locker is active. This should be >= _young_list_target_length; 214 uint _young_list_max_length; 215 216 SurvRateGroup* _short_lived_surv_rate_group; 217 SurvRateGroup* _survivor_surv_rate_group; 218 // add here any more surv rate groups 219 220 double _gc_overhead_perc; 221 222 double _reserve_factor; 223 uint _reserve_regions; 224 225 enum PredictionConstants { 226 TruncatedSeqLength = 10, 227 NumPrevPausesForHeuristics = 10 228 }; 229 230 TruncatedSeq* _alloc_rate_ms_seq; 231 double _prev_collection_pause_end_ms; 232 233 TruncatedSeq* _rs_length_diff_seq; 234 TruncatedSeq* _cost_per_card_ms_seq; 235 TruncatedSeq* _cost_scan_hcc_seq; 236 TruncatedSeq* _young_cards_per_entry_ratio_seq; 237 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 238 TruncatedSeq* _cost_per_entry_ms_seq; 239 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 240 TruncatedSeq* _cost_per_byte_ms_seq; 241 TruncatedSeq* _constant_other_time_ms_seq; 242 TruncatedSeq* _young_other_cost_per_region_ms_seq; 243 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 244 245 TruncatedSeq* _pending_cards_seq; 246 TruncatedSeq* _rs_lengths_seq; 247 248 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 249 250 G1YoungGenSizer* _young_gen_sizer; 251 252 uint _eden_cset_region_length; 253 uint _survivor_cset_region_length; 254 uint _old_cset_region_length; 255 256 void init_cset_region_lengths(uint eden_cset_region_length, 257 uint survivor_cset_region_length); 258 259 uint eden_cset_region_length() const { return _eden_cset_region_length; } 260 uint survivor_cset_region_length() const { return _survivor_cset_region_length; } 261 uint old_cset_region_length() const { return _old_cset_region_length; } 262 263 uint _free_regions_at_end_of_collection; 264 265 size_t _recorded_rs_lengths; 266 size_t _max_rs_lengths; 267 268 size_t _rs_lengths_prediction; 269 270 #ifndef PRODUCT 271 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 272 #endif // PRODUCT 273 274 void adjust_concurrent_refinement(double update_rs_time, 275 double update_rs_processed_buffers, 276 double goal_ms); 277 278 uintx no_of_gc_threads() { return _no_of_gc_threads; } 279 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; } 280 281 double _pause_time_target_ms; 282 283 size_t _pending_cards; 284 285 // The amount of allocated bytes in old gen during the last mutator and the following 286 // young GC phase. 287 size_t _bytes_allocated_in_old_since_last_gc; 288 289 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; 290 public: 291 const G1Predictions& predictor() const { return _predictor; } 292 293 // Add the given number of bytes to the total number of allocated bytes in the old gen. 294 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } 295 296 // Accessors 297 298 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 299 hr->set_eden(); 300 hr->install_surv_rate_group(_short_lived_surv_rate_group); 301 hr->set_young_index_in_cset(young_index_in_cset); 302 } 303 304 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 305 assert(hr->is_survivor(), "pre-condition"); 306 hr->install_surv_rate_group(_survivor_surv_rate_group); 307 hr->set_young_index_in_cset(young_index_in_cset); 308 } 309 310 #ifndef PRODUCT 311 bool verify_young_ages(); 312 #endif // PRODUCT 313 314 void record_max_rs_lengths(size_t rs_lengths) { 315 _max_rs_lengths = rs_lengths; 316 } 317 318 size_t predict_rs_length_diff() const; 319 320 double predict_alloc_rate_ms() const; 321 322 double predict_cost_per_card_ms() const; 323 324 double predict_scan_hcc_ms() const; 325 326 double predict_rs_update_time_ms(size_t pending_cards) const; 327 328 double predict_young_cards_per_entry_ratio() const; 329 330 double predict_mixed_cards_per_entry_ratio() const; 331 332 size_t predict_young_card_num(size_t rs_length) const; 333 334 size_t predict_non_young_card_num(size_t rs_length) const; 335 336 double predict_rs_scan_time_ms(size_t card_num) const; 337 338 double predict_mixed_rs_scan_time_ms(size_t card_num) const; 339 340 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; 341 342 double predict_object_copy_time_ms(size_t bytes_to_copy) const; 343 344 double predict_constant_other_time_ms() const; 345 346 double predict_young_other_time_ms(size_t young_num) const; 347 348 double predict_non_young_other_time_ms(size_t non_young_num) const; 349 350 double predict_base_elapsed_time_ms(size_t pending_cards) const; 351 double predict_base_elapsed_time_ms(size_t pending_cards, 352 size_t scanned_cards) const; 353 size_t predict_bytes_to_copy(HeapRegion* hr) const; 354 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; 355 356 void set_recorded_rs_lengths(size_t rs_lengths); 357 358 uint cset_region_length() const { return young_cset_region_length() + 359 old_cset_region_length(); } 360 uint young_cset_region_length() const { return eden_cset_region_length() + 361 survivor_cset_region_length(); } 362 363 double predict_survivor_regions_evac_time() const; 364 365 bool should_update_surv_rate_group_predictors() { 366 return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window(); 367 } 368 369 void cset_regions_freed() { 370 bool update = should_update_surv_rate_group_predictors(); 371 372 _short_lived_surv_rate_group->all_surviving_words_recorded(update); 373 _survivor_surv_rate_group->all_surviving_words_recorded(update); 374 } 375 376 G1MMUTracker* mmu_tracker() { 377 return _mmu_tracker; 378 } 379 380 const G1MMUTracker* mmu_tracker() const { 381 return _mmu_tracker; 382 } 383 384 double max_pause_time_ms() const { 385 return _mmu_tracker->max_gc_time() * 1000.0; 386 } 387 388 double predict_remark_time_ms() const; 389 390 double predict_cleanup_time_ms() const; 391 392 // Returns an estimate of the survival rate of the region at yg-age 393 // "yg_age". 394 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; 395 396 double predict_yg_surv_rate(int age) const; 397 398 double accum_yg_surv_rate_pred(int age) const; 399 400 protected: 401 virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; 402 virtual double other_time_ms(double pause_time_ms) const; 403 404 double young_other_time_ms() const; 405 double non_young_other_time_ms() const; 406 double constant_other_time_ms(double pause_time_ms) const; 407 408 private: 409 // Statistics kept per GC stoppage, pause or full. 410 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 411 412 // Add a new GC of the given duration and end time to the record. 413 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 414 415 // The head of the list (via "next_in_collection_set()") representing the 416 // current collection set. Set from the incrementally built collection 417 // set at the start of the pause. 418 HeapRegion* _collection_set; 419 420 // The number of bytes in the collection set before the pause. Set from 421 // the incrementally built collection set at the start of an evacuation 422 // pause, and incremented in finalize_old_cset_part() when adding old regions 423 // (if any) to the collection set. 424 size_t _collection_set_bytes_used_before; 425 426 // The number of bytes copied during the GC. 427 size_t _bytes_copied_during_gc; 428 429 // The associated information that is maintained while the incremental 430 // collection set is being built with young regions. Used to populate 431 // the recorded info for the evacuation pause. 432 433 enum CSetBuildType { 434 Active, // We are actively building the collection set 435 Inactive // We are not actively building the collection set 436 }; 437 438 CSetBuildType _inc_cset_build_state; 439 440 // The head of the incrementally built collection set. 441 HeapRegion* _inc_cset_head; 442 443 // The tail of the incrementally built collection set. 444 HeapRegion* _inc_cset_tail; 445 446 // The number of bytes in the incrementally built collection set. 447 // Used to set _collection_set_bytes_used_before at the start of 448 // an evacuation pause. 449 size_t _inc_cset_bytes_used_before; 450 451 // Used to record the highest end of heap region in collection set 452 HeapWord* _inc_cset_max_finger; 453 454 // The RSet lengths recorded for regions in the CSet. It is updated 455 // by the thread that adds a new region to the CSet. We assume that 456 // only one thread can be allocating a new CSet region (currently, 457 // it does so after taking the Heap_lock) hence no need to 458 // synchronize updates to this field. 459 size_t _inc_cset_recorded_rs_lengths; 460 461 // A concurrent refinement thread periodically samples the young 462 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 463 // the RSets grow. Instead of having to synchronize updates to that 464 // field we accumulate them in this field and add it to 465 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 466 ssize_t _inc_cset_recorded_rs_lengths_diffs; 467 468 // The predicted elapsed time it will take to collect the regions in 469 // the CSet. This is updated by the thread that adds a new region to 470 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 471 // MT-safety assumptions. 472 double _inc_cset_predicted_elapsed_time_ms; 473 474 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 475 double _inc_cset_predicted_elapsed_time_ms_diffs; 476 477 // Stash a pointer to the g1 heap. 478 G1CollectedHeap* _g1; 479 480 G1GCPhaseTimes* _phase_times; 481 482 // The ratio of gc time to elapsed time, computed over recent pauses. 483 double _recent_avg_pause_time_ratio; 484 485 double recent_avg_pause_time_ratio() const { 486 return _recent_avg_pause_time_ratio; 487 } 488 489 // This set of variables tracks the collector efficiency, in order to 490 // determine whether we should initiate a new marking. 491 double _cur_mark_stop_world_time_ms; 492 double _mark_remark_start_sec; 493 double _mark_cleanup_start_sec; 494 495 // Updates the internal young list maximum and target lengths. Returns the 496 // unbounded young list target length. 497 uint update_young_list_max_and_target_length(); 498 uint update_young_list_max_and_target_length(size_t rs_lengths); 499 500 // Update the young list target length either by setting it to the 501 // desired fixed value or by calculating it using G1's pause 502 // prediction model. If no rs_lengths parameter is passed, predict 503 // the RS lengths using the prediction model, otherwise use the 504 // given rs_lengths as the prediction. 505 // Returns the unbounded young list target length. 506 uint update_young_list_target_length(size_t rs_lengths); 507 508 // Calculate and return the minimum desired young list target 509 // length. This is the minimum desired young list length according 510 // to the user's inputs. 511 uint calculate_young_list_desired_min_length(uint base_min_length) const; 512 513 // Calculate and return the maximum desired young list target 514 // length. This is the maximum desired young list length according 515 // to the user's inputs. 516 uint calculate_young_list_desired_max_length() const; 517 518 // Calculate and return the maximum young list target length that 519 // can fit into the pause time goal. The parameters are: rs_lengths 520 // represent the prediction of how large the young RSet lengths will 521 // be, base_min_length is the already existing number of regions in 522 // the young list, min_length and max_length are the desired min and 523 // max young list length according to the user's inputs. 524 uint calculate_young_list_target_length(size_t rs_lengths, 525 uint base_min_length, 526 uint desired_min_length, 527 uint desired_max_length) const; 528 529 // Result of the bounded_young_list_target_length() method, containing both the 530 // bounded as well as the unbounded young list target lengths in this order. 531 typedef Pair<uint, uint, StackObj> YoungTargetLengths; 532 YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const; 533 534 void update_rs_lengths_prediction(); 535 void update_rs_lengths_prediction(size_t prediction); 536 537 // Calculate and return chunk size (in number of regions) for parallel 538 // concurrent mark cleanup. 539 uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const; 540 541 // Check whether a given young length (young_length) fits into the 542 // given target pause time and whether the prediction for the amount 543 // of objects to be copied for the given length will fit into the 544 // given free space (expressed by base_free_regions). It is used by 545 // calculate_young_list_target_length(). 546 bool predict_will_fit(uint young_length, double base_time_ms, 547 uint base_free_regions, double target_pause_time_ms) const; 548 549 // Calculate the minimum number of old regions we'll add to the CSet 550 // during a mixed GC. 551 uint calc_min_old_cset_length() const; 552 553 // Calculate the maximum number of old regions we'll add to the CSet 554 // during a mixed GC. 555 uint calc_max_old_cset_length() const; 556 557 // Returns the given amount of uncollected reclaimable space 558 // as a percentage of the current heap capacity. 559 double reclaimable_bytes_perc(size_t reclaimable_bytes) const; 560 561 // Sets up marking if proper conditions are met. 562 void maybe_start_marking(); 563 564 // The kind of STW pause. 565 enum PauseKind { 566 FullGC, 567 YoungOnlyGC, 568 MixedGC, 569 LastYoungGC, 570 InitialMarkGC, 571 Cleanup, 572 Remark 573 }; 574 575 // Calculate PauseKind from internal state. 576 PauseKind young_gc_pause_kind() const; 577 // Record the given STW pause with the given start and end times (in s). 578 void record_pause(PauseKind kind, double start, double end); 579 // Indicate that we aborted marking before doing any mixed GCs. 580 void abort_time_to_mixed_tracking(); 581 public: 582 583 G1CollectorPolicy(); 584 585 virtual ~G1CollectorPolicy(); 586 587 virtual G1CollectorPolicy* as_g1_policy() { return this; } 588 589 G1CollectorState* collector_state() const; 590 591 G1GCPhaseTimes* phase_times() const { return _phase_times; } 592 593 // Check the current value of the young list RSet lengths and 594 // compare it against the last prediction. If the current value is 595 // higher, recalculate the young list target length prediction. 596 void revise_young_list_target_length_if_necessary(); 597 598 // This should be called after the heap is resized. 599 void record_new_heap_size(uint new_number_of_regions); 600 601 void init(); 602 603 virtual void note_gc_start(uint num_active_workers); 604 605 // Create jstat counters for the policy. 606 virtual void initialize_gc_policy_counters(); 607 608 virtual HeapWord* mem_allocate_work(size_t size, 609 bool is_tlab, 610 bool* gc_overhead_limit_was_exceeded); 611 612 // This method controls how a collector handles one or more 613 // of its generations being fully allocated. 614 virtual HeapWord* satisfy_failed_allocation(size_t size, 615 bool is_tlab); 616 617 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 618 619 bool about_to_start_mixed_phase() const; 620 621 // Record the start and end of an evacuation pause. 622 void record_collection_pause_start(double start_time_sec); 623 void record_collection_pause_end(double pause_time_ms, size_t cards_scanned); 624 625 // Record the start and end of a full collection. 626 void record_full_collection_start(); 627 void record_full_collection_end(); 628 629 // Must currently be called while the world is stopped. 630 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); 631 632 // Record start and end of remark. 633 void record_concurrent_mark_remark_start(); 634 void record_concurrent_mark_remark_end(); 635 636 // Record start, end, and completion of cleanup. 637 void record_concurrent_mark_cleanup_start(); 638 void record_concurrent_mark_cleanup_end(); 639 void record_concurrent_mark_cleanup_completed(); 640 641 // Records the information about the heap size for reporting in 642 // print_detailed_heap_transition 643 void record_heap_size_info_at_start(bool full); 644 645 // Print heap sizing transition (with less and more detail). 646 647 void print_heap_transition(size_t bytes_before) const; 648 void print_heap_transition() const; 649 void print_detailed_heap_transition(bool full = false) const; 650 651 virtual void print_phases(double pause_time_sec); 652 653 void record_stop_world_start(); 654 void record_concurrent_pause(); 655 656 // Record how much space we copied during a GC. This is typically 657 // called when a GC alloc region is being retired. 658 void record_bytes_copied_during_gc(size_t bytes) { 659 _bytes_copied_during_gc += bytes; 660 } 661 662 // The amount of space we copied during a GC. 663 size_t bytes_copied_during_gc() const { 664 return _bytes_copied_during_gc; 665 } 666 667 size_t collection_set_bytes_used_before() const { 668 return _collection_set_bytes_used_before; 669 } 670 671 // Determine whether there are candidate regions so that the 672 // next GC should be mixed. The two action strings are used 673 // in the ergo output when the method returns true or false. 674 bool next_gc_should_be_mixed(const char* true_action_str, 675 const char* false_action_str) const; 676 677 // Choose a new collection set. Marks the chosen regions as being 678 // "in_collection_set", and links them together. The head and number of 679 // the collection set are available via access methods. 680 double finalize_young_cset_part(double target_pause_time_ms); 681 virtual void finalize_old_cset_part(double time_remaining_ms); 682 683 // The head of the list (via "next_in_collection_set()") representing the 684 // current collection set. 685 HeapRegion* collection_set() { return _collection_set; } 686 687 void clear_collection_set() { _collection_set = NULL; } 688 689 // Add old region "hr" to the CSet. 690 void add_old_region_to_cset(HeapRegion* hr); 691 692 // Incremental CSet Support 693 694 // The head of the incrementally built collection set. 695 HeapRegion* inc_cset_head() { return _inc_cset_head; } 696 697 // The tail of the incrementally built collection set. 698 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 699 700 // Initialize incremental collection set info. 701 void start_incremental_cset_building(); 702 703 // Perform any final calculations on the incremental CSet fields 704 // before we can use them. 705 void finalize_incremental_cset_building(); 706 707 void clear_incremental_cset() { 708 _inc_cset_head = NULL; 709 _inc_cset_tail = NULL; 710 } 711 712 // Stop adding regions to the incremental collection set 713 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 714 715 // Add information about hr to the aggregated information for the 716 // incrementally built collection set. 717 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 718 719 // Update information about hr in the aggregated information for 720 // the incrementally built collection set. 721 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 722 723 private: 724 // Update the incremental cset information when adding a region 725 // (should not be called directly). 726 void add_region_to_incremental_cset_common(HeapRegion* hr); 727 728 public: 729 // Add hr to the LHS of the incremental collection set. 730 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 731 732 // Add hr to the RHS of the incremental collection set. 733 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 734 735 #ifndef PRODUCT 736 void print_collection_set(HeapRegion* list_head, outputStream* st); 737 #endif // !PRODUCT 738 739 // This sets the initiate_conc_mark_if_possible() flag to start a 740 // new cycle, as long as we are not already in one. It's best if it 741 // is called during a safepoint when the test whether a cycle is in 742 // progress or not is stable. 743 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 744 745 // This is called at the very beginning of an evacuation pause (it 746 // has to be the first thing that the pause does). If 747 // initiate_conc_mark_if_possible() is true, and the concurrent 748 // marking thread has completed its work during the previous cycle, 749 // it will set during_initial_mark_pause() to so that the pause does 750 // the initial-mark work and start a marking cycle. 751 void decide_on_conc_mark_initiation(); 752 753 // If an expansion would be appropriate, because recent GC overhead had 754 // exceeded the desired limit, return an amount to expand by. 755 virtual size_t expansion_amount() const; 756 757 // Print tracing information. 758 void print_tracing_info() const; 759 760 // Print stats on young survival ratio 761 void print_yg_surv_rate_info() const; 762 763 void finished_recalculating_age_indexes(bool is_survivors) { 764 if (is_survivors) { 765 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 766 } else { 767 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 768 } 769 // do that for any other surv rate groups 770 } 771 772 size_t young_list_target_length() const { return _young_list_target_length; } 773 774 bool is_young_list_full() const; 775 776 bool can_expand_young_list() const; 777 778 uint young_list_max_length() const { 779 return _young_list_max_length; 780 } 781 782 bool adaptive_young_list_length() const { 783 return _young_gen_sizer->adaptive_young_list_length(); 784 } 785 786 virtual bool should_process_references() const { 787 return true; 788 } 789 790 private: 791 // 792 // Survivor regions policy. 793 // 794 795 // Current tenuring threshold, set to 0 if the collector reaches the 796 // maximum amount of survivors regions. 797 uint _tenuring_threshold; 798 799 // The limit on the number of regions allocated for survivors. 800 uint _max_survivor_regions; 801 802 // For reporting purposes. 803 // The value of _heap_bytes_before_gc is also used to calculate 804 // the cost of copying. 805 806 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC 807 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC 808 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC 809 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC 810 811 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC 812 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC 813 814 // The amount of survivor regions after a collection. 815 uint _recorded_survivor_regions; 816 // List of survivor regions. 817 HeapRegion* _recorded_survivor_head; 818 HeapRegion* _recorded_survivor_tail; 819 820 ageTable _survivors_age_table; 821 822 public: 823 uint tenuring_threshold() const { return _tenuring_threshold; } 824 825 static const uint REGIONS_UNLIMITED = (uint) -1; 826 827 uint max_regions(InCSetState dest) const { 828 switch (dest.value()) { 829 case InCSetState::Young: 830 return _max_survivor_regions; 831 case InCSetState::Old: 832 return REGIONS_UNLIMITED; 833 default: 834 assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value()); 835 break; 836 } 837 // keep some compilers happy 838 return 0; 839 } 840 841 void note_start_adding_survivor_regions() { 842 _survivor_surv_rate_group->start_adding_regions(); 843 } 844 845 void note_stop_adding_survivor_regions() { 846 _survivor_surv_rate_group->stop_adding_regions(); 847 } 848 849 void record_survivor_regions(uint regions, 850 HeapRegion* head, 851 HeapRegion* tail) { 852 _recorded_survivor_regions = regions; 853 _recorded_survivor_head = head; 854 _recorded_survivor_tail = tail; 855 } 856 857 uint recorded_survivor_regions() const { 858 return _recorded_survivor_regions; 859 } 860 861 void record_age_table(ageTable* age_table) { 862 _survivors_age_table.merge(age_table); 863 } 864 865 void update_max_gc_locker_expansion(); 866 867 // Calculates survivor space parameters. 868 void update_survivors_policy(); 869 870 virtual void post_heap_initialize(); 871 }; 872 873 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP