1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc/g1/collectionSetChooser.hpp" 29 #include "gc/g1/g1CollectorState.hpp" 30 #include "gc/g1/g1InCSetState.hpp" 31 #include "gc/g1/g1MMUTracker.hpp" 32 #include "gc/shared/collectorPolicy.hpp" 33 34 // A G1CollectorPolicy makes policy decisions that determine the 35 // characteristics of the collector. Examples include: 36 // * choice of collection set. 37 // * when to collect. 38 39 class HeapRegion; 40 class CollectionSetChooser; 41 class G1GCPhaseTimes; 42 43 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses 44 // (the latter may contain non-young regions - i.e. regions that are 45 // technically in old) while TraceOldGenTime collects data about full GCs. 46 class TraceYoungGenTimeData : public CHeapObj<mtGC> { 47 private: 48 unsigned _young_pause_num; 49 unsigned _mixed_pause_num; 50 51 NumberSeq _all_stop_world_times_ms; 52 NumberSeq _all_yield_times_ms; 53 54 NumberSeq _total; 55 NumberSeq _other; 56 NumberSeq _root_region_scan_wait; 57 NumberSeq _parallel; 58 NumberSeq _ext_root_scan; 59 NumberSeq _satb_filtering; 60 NumberSeq _update_rs; 61 NumberSeq _scan_rs; 62 NumberSeq _obj_copy; 63 NumberSeq _termination; 64 NumberSeq _parallel_other; 65 NumberSeq _clear_ct; 66 67 void print_summary(const char* str, const NumberSeq* seq) const; 68 void print_summary_sd(const char* str, const NumberSeq* seq) const; 69 70 public: 71 TraceYoungGenTimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; 72 void record_start_collection(double time_to_stop_the_world_ms); 73 void record_yield_time(double yield_time_ms); 74 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times); 75 void increment_young_collection_count(); 76 void increment_mixed_collection_count(); 77 void print() const; 78 }; 79 80 class TraceOldGenTimeData : public CHeapObj<mtGC> { 81 private: 82 NumberSeq _all_full_gc_times; 83 84 public: 85 void record_full_collection(double full_gc_time_ms); 86 void print() const; 87 }; 88 89 // There are three command line options related to the young gen size: 90 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 91 // just a short form for NewSize==MaxNewSize). G1 will use its internal 92 // heuristics to calculate the actual young gen size, so these options 93 // basically only limit the range within which G1 can pick a young gen 94 // size. Also, these are general options taking byte sizes. G1 will 95 // internally work with a number of regions instead. So, some rounding 96 // will occur. 97 // 98 // If nothing related to the the young gen size is set on the command 99 // line we should allow the young gen to be between G1NewSizePercent 100 // and G1MaxNewSizePercent of the heap size. This means that every time 101 // the heap size changes, the limits for the young gen size will be 102 // recalculated. 103 // 104 // If only -XX:NewSize is set we should use the specified value as the 105 // minimum size for young gen. Still using G1MaxNewSizePercent of the 106 // heap as maximum. 107 // 108 // If only -XX:MaxNewSize is set we should use the specified value as the 109 // maximum size for young gen. Still using G1NewSizePercent of the heap 110 // as minimum. 111 // 112 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 113 // No updates when the heap size changes. There is a special case when 114 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 115 // different heuristic for calculating the collection set when we do mixed 116 // collection. 117 // 118 // If only -XX:NewRatio is set we should use the specified ratio of the heap 119 // as both min and max. This will be interpreted as "fixed" just like the 120 // NewSize==MaxNewSize case above. But we will update the min and max 121 // every time the heap size changes. 122 // 123 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 124 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 125 class G1YoungGenSizer : public CHeapObj<mtGC> { 126 private: 127 enum SizerKind { 128 SizerDefaults, 129 SizerNewSizeOnly, 130 SizerMaxNewSizeOnly, 131 SizerMaxAndNewSize, 132 SizerNewRatio 133 }; 134 SizerKind _sizer_kind; 135 uint _min_desired_young_length; 136 uint _max_desired_young_length; 137 bool _adaptive_size; 138 uint calculate_default_min_length(uint new_number_of_heap_regions); 139 uint calculate_default_max_length(uint new_number_of_heap_regions); 140 141 // Update the given values for minimum and maximum young gen length in regions 142 // given the number of heap regions depending on the kind of sizing algorithm. 143 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); 144 145 public: 146 G1YoungGenSizer(); 147 // Calculate the maximum length of the young gen given the number of regions 148 // depending on the sizing algorithm. 149 uint max_young_length(uint number_of_heap_regions); 150 151 void heap_size_changed(uint new_number_of_heap_regions); 152 uint min_desired_young_length() { 153 return _min_desired_young_length; 154 } 155 uint max_desired_young_length() { 156 return _max_desired_young_length; 157 } 158 bool adaptive_young_list_length() { 159 return _adaptive_size; 160 } 161 }; 162 163 class G1CollectorPolicy: public CollectorPolicy { 164 private: 165 // either equal to the number of parallel threads, if ParallelGCThreads 166 // has been set, or 1 otherwise 167 int _parallel_gc_threads; 168 169 // The number of GC threads currently active. 170 uintx _no_of_gc_threads; 171 172 enum SomePrivateConstants { 173 NumPrevPausesForHeuristics = 10 174 }; 175 176 G1MMUTracker* _mmu_tracker; 177 178 void initialize_alignments(); 179 void initialize_flags(); 180 181 CollectionSetChooser* _collectionSetChooser; 182 183 double _full_collection_start_sec; 184 uint _cur_collection_pause_used_regions_at_start; 185 186 // These exclude marking times. 187 TruncatedSeq* _recent_gc_times_ms; 188 189 TruncatedSeq* _concurrent_mark_remark_times_ms; 190 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 191 192 TraceYoungGenTimeData _trace_young_gen_time_data; 193 TraceOldGenTimeData _trace_old_gen_time_data; 194 195 double _stop_world_start; 196 197 uint _young_list_target_length; 198 uint _young_list_fixed_length; 199 200 // The max number of regions we can extend the eden by while the GC 201 // locker is active. This should be >= _young_list_target_length; 202 uint _young_list_max_length; 203 204 SurvRateGroup* _short_lived_surv_rate_group; 205 SurvRateGroup* _survivor_surv_rate_group; 206 // add here any more surv rate groups 207 208 double _gc_overhead_perc; 209 210 double _reserve_factor; 211 uint _reserve_regions; 212 213 enum PredictionConstants { 214 TruncatedSeqLength = 10 215 }; 216 217 TruncatedSeq* _alloc_rate_ms_seq; 218 double _prev_collection_pause_end_ms; 219 220 TruncatedSeq* _rs_length_diff_seq; 221 TruncatedSeq* _cost_per_card_ms_seq; 222 TruncatedSeq* _young_cards_per_entry_ratio_seq; 223 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 224 TruncatedSeq* _cost_per_entry_ms_seq; 225 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 226 TruncatedSeq* _cost_per_byte_ms_seq; 227 TruncatedSeq* _constant_other_time_ms_seq; 228 TruncatedSeq* _young_other_cost_per_region_ms_seq; 229 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 230 231 TruncatedSeq* _pending_cards_seq; 232 TruncatedSeq* _rs_lengths_seq; 233 234 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 235 236 G1YoungGenSizer* _young_gen_sizer; 237 238 uint _eden_cset_region_length; 239 uint _survivor_cset_region_length; 240 uint _old_cset_region_length; 241 242 void init_cset_region_lengths(uint eden_cset_region_length, 243 uint survivor_cset_region_length); 244 245 uint eden_cset_region_length() { return _eden_cset_region_length; } 246 uint survivor_cset_region_length() { return _survivor_cset_region_length; } 247 uint old_cset_region_length() { return _old_cset_region_length; } 248 249 uint _free_regions_at_end_of_collection; 250 251 size_t _recorded_rs_lengths; 252 size_t _max_rs_lengths; 253 double _sigma; 254 255 size_t _rs_lengths_prediction; 256 257 double sigma() { return _sigma; } 258 259 // A function that prevents us putting too much stock in small sample 260 // sets. Returns a number between 2.0 and 1.0, depending on the number 261 // of samples. 5 or more samples yields one; fewer scales linearly from 262 // 2.0 at 1 sample to 1.0 at 5. 263 double confidence_factor(int samples) { 264 if (samples > 4) return 1.0; 265 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 266 } 267 268 double get_new_neg_prediction(TruncatedSeq* seq) { 269 return seq->davg() - sigma() * seq->dsd(); 270 } 271 272 #ifndef PRODUCT 273 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 274 #endif // PRODUCT 275 276 void adjust_concurrent_refinement(double update_rs_time, 277 double update_rs_processed_buffers, 278 double goal_ms); 279 280 uintx no_of_gc_threads() { return _no_of_gc_threads; } 281 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; } 282 283 double _pause_time_target_ms; 284 285 size_t _pending_cards; 286 287 public: 288 // Accessors 289 290 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 291 hr->set_eden(); 292 hr->install_surv_rate_group(_short_lived_surv_rate_group); 293 hr->set_young_index_in_cset(young_index_in_cset); 294 } 295 296 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 297 assert(hr->is_survivor(), "pre-condition"); 298 hr->install_surv_rate_group(_survivor_surv_rate_group); 299 hr->set_young_index_in_cset(young_index_in_cset); 300 } 301 302 #ifndef PRODUCT 303 bool verify_young_ages(); 304 #endif // PRODUCT 305 306 double get_new_prediction(TruncatedSeq* seq) { 307 return MAX2(seq->davg() + sigma() * seq->dsd(), 308 seq->davg() * confidence_factor(seq->num())); 309 } 310 311 void record_max_rs_lengths(size_t rs_lengths) { 312 _max_rs_lengths = rs_lengths; 313 } 314 315 size_t predict_rs_length_diff() { 316 return (size_t) get_new_prediction(_rs_length_diff_seq); 317 } 318 319 double predict_alloc_rate_ms() { 320 return get_new_prediction(_alloc_rate_ms_seq); 321 } 322 323 double predict_cost_per_card_ms() { 324 return get_new_prediction(_cost_per_card_ms_seq); 325 } 326 327 double predict_rs_update_time_ms(size_t pending_cards) { 328 return (double) pending_cards * predict_cost_per_card_ms(); 329 } 330 331 double predict_young_cards_per_entry_ratio() { 332 return get_new_prediction(_young_cards_per_entry_ratio_seq); 333 } 334 335 double predict_mixed_cards_per_entry_ratio() { 336 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 337 return predict_young_cards_per_entry_ratio(); 338 } else { 339 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 340 } 341 } 342 343 size_t predict_young_card_num(size_t rs_length) { 344 return (size_t) ((double) rs_length * 345 predict_young_cards_per_entry_ratio()); 346 } 347 348 size_t predict_non_young_card_num(size_t rs_length) { 349 return (size_t) ((double) rs_length * 350 predict_mixed_cards_per_entry_ratio()); 351 } 352 353 double predict_rs_scan_time_ms(size_t card_num) { 354 if (collector_state()->gcs_are_young()) { 355 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 356 } else { 357 return predict_mixed_rs_scan_time_ms(card_num); 358 } 359 } 360 361 double predict_mixed_rs_scan_time_ms(size_t card_num) { 362 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 363 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 364 } else { 365 return (double) (card_num * 366 get_new_prediction(_mixed_cost_per_entry_ms_seq)); 367 } 368 } 369 370 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 371 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 372 return (1.1 * (double) bytes_to_copy) * 373 get_new_prediction(_cost_per_byte_ms_seq); 374 } else { 375 return (double) bytes_to_copy * 376 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 377 } 378 } 379 380 double predict_object_copy_time_ms(size_t bytes_to_copy) { 381 if (collector_state()->during_concurrent_mark()) { 382 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 383 } else { 384 return (double) bytes_to_copy * 385 get_new_prediction(_cost_per_byte_ms_seq); 386 } 387 } 388 389 double predict_constant_other_time_ms() { 390 return get_new_prediction(_constant_other_time_ms_seq); 391 } 392 393 double predict_young_other_time_ms(size_t young_num) { 394 return (double) young_num * 395 get_new_prediction(_young_other_cost_per_region_ms_seq); 396 } 397 398 double predict_non_young_other_time_ms(size_t non_young_num) { 399 return (double) non_young_num * 400 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 401 } 402 403 double predict_base_elapsed_time_ms(size_t pending_cards); 404 double predict_base_elapsed_time_ms(size_t pending_cards, 405 size_t scanned_cards); 406 size_t predict_bytes_to_copy(HeapRegion* hr); 407 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc); 408 409 void set_recorded_rs_lengths(size_t rs_lengths); 410 411 uint cset_region_length() { return young_cset_region_length() + 412 old_cset_region_length(); } 413 uint young_cset_region_length() { return eden_cset_region_length() + 414 survivor_cset_region_length(); } 415 416 double predict_survivor_regions_evac_time(); 417 418 void cset_regions_freed() { 419 bool propagate = collector_state()->should_propagate(); 420 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 421 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 422 // also call it on any more surv rate groups 423 } 424 425 G1MMUTracker* mmu_tracker() { 426 return _mmu_tracker; 427 } 428 429 double max_pause_time_ms() { 430 return _mmu_tracker->max_gc_time() * 1000.0; 431 } 432 433 double predict_remark_time_ms() { 434 return get_new_prediction(_concurrent_mark_remark_times_ms); 435 } 436 437 double predict_cleanup_time_ms() { 438 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 439 } 440 441 // Returns an estimate of the survival rate of the region at yg-age 442 // "yg_age". 443 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 444 TruncatedSeq* seq = surv_rate_group->get_seq(age); 445 if (seq->num() == 0) 446 gclog_or_tty->print("BARF! age is %d", age); 447 guarantee( seq->num() > 0, "invariant" ); 448 double pred = get_new_prediction(seq); 449 if (pred > 1.0) 450 pred = 1.0; 451 return pred; 452 } 453 454 double predict_yg_surv_rate(int age) { 455 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 456 } 457 458 double accum_yg_surv_rate_pred(int age) { 459 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 460 } 461 462 private: 463 // Statistics kept per GC stoppage, pause or full. 464 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 465 466 // Add a new GC of the given duration and end time to the record. 467 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 468 469 // The head of the list (via "next_in_collection_set()") representing the 470 // current collection set. Set from the incrementally built collection 471 // set at the start of the pause. 472 HeapRegion* _collection_set; 473 474 // The number of bytes in the collection set before the pause. Set from 475 // the incrementally built collection set at the start of an evacuation 476 // pause, and incremented in finalize_old_cset_part() when adding old regions 477 // (if any) to the collection set. 478 size_t _collection_set_bytes_used_before; 479 480 // The number of bytes copied during the GC. 481 size_t _bytes_copied_during_gc; 482 483 // The associated information that is maintained while the incremental 484 // collection set is being built with young regions. Used to populate 485 // the recorded info for the evacuation pause. 486 487 enum CSetBuildType { 488 Active, // We are actively building the collection set 489 Inactive // We are not actively building the collection set 490 }; 491 492 CSetBuildType _inc_cset_build_state; 493 494 // The head of the incrementally built collection set. 495 HeapRegion* _inc_cset_head; 496 497 // The tail of the incrementally built collection set. 498 HeapRegion* _inc_cset_tail; 499 500 // The number of bytes in the incrementally built collection set. 501 // Used to set _collection_set_bytes_used_before at the start of 502 // an evacuation pause. 503 size_t _inc_cset_bytes_used_before; 504 505 // Used to record the highest end of heap region in collection set 506 HeapWord* _inc_cset_max_finger; 507 508 // The RSet lengths recorded for regions in the CSet. It is updated 509 // by the thread that adds a new region to the CSet. We assume that 510 // only one thread can be allocating a new CSet region (currently, 511 // it does so after taking the Heap_lock) hence no need to 512 // synchronize updates to this field. 513 size_t _inc_cset_recorded_rs_lengths; 514 515 // A concurrent refinement thread periodically samples the young 516 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 517 // the RSets grow. Instead of having to synchronize updates to that 518 // field we accumulate them in this field and add it to 519 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 520 ssize_t _inc_cset_recorded_rs_lengths_diffs; 521 522 // The predicted elapsed time it will take to collect the regions in 523 // the CSet. This is updated by the thread that adds a new region to 524 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 525 // MT-safety assumptions. 526 double _inc_cset_predicted_elapsed_time_ms; 527 528 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 529 double _inc_cset_predicted_elapsed_time_ms_diffs; 530 531 // Stash a pointer to the g1 heap. 532 G1CollectedHeap* _g1; 533 534 G1GCPhaseTimes* _phase_times; 535 536 // The ratio of gc time to elapsed time, computed over recent pauses. 537 double _recent_avg_pause_time_ratio; 538 539 double recent_avg_pause_time_ratio() { 540 return _recent_avg_pause_time_ratio; 541 } 542 543 // This set of variables tracks the collector efficiency, in order to 544 // determine whether we should initiate a new marking. 545 double _cur_mark_stop_world_time_ms; 546 double _mark_remark_start_sec; 547 double _mark_cleanup_start_sec; 548 549 // Update the young list target length either by setting it to the 550 // desired fixed value or by calculating it using G1's pause 551 // prediction model. If no rs_lengths parameter is passed, predict 552 // the RS lengths using the prediction model, otherwise use the 553 // given rs_lengths as the prediction. 554 void update_young_list_target_length(size_t rs_lengths = (size_t) -1); 555 556 // Calculate and return the minimum desired young list target 557 // length. This is the minimum desired young list length according 558 // to the user's inputs. 559 uint calculate_young_list_desired_min_length(uint base_min_length); 560 561 // Calculate and return the maximum desired young list target 562 // length. This is the maximum desired young list length according 563 // to the user's inputs. 564 uint calculate_young_list_desired_max_length(); 565 566 // Calculate and return the maximum young list target length that 567 // can fit into the pause time goal. The parameters are: rs_lengths 568 // represent the prediction of how large the young RSet lengths will 569 // be, base_min_length is the already existing number of regions in 570 // the young list, min_length and max_length are the desired min and 571 // max young list length according to the user's inputs. 572 uint calculate_young_list_target_length(size_t rs_lengths, 573 uint base_min_length, 574 uint desired_min_length, 575 uint desired_max_length); 576 577 // Calculate and return chunk size (in number of regions) for parallel 578 // concurrent mark cleanup. 579 uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions); 580 581 // Check whether a given young length (young_length) fits into the 582 // given target pause time and whether the prediction for the amount 583 // of objects to be copied for the given length will fit into the 584 // given free space (expressed by base_free_regions). It is used by 585 // calculate_young_list_target_length(). 586 bool predict_will_fit(uint young_length, double base_time_ms, 587 uint base_free_regions, double target_pause_time_ms); 588 589 // Calculate the minimum number of old regions we'll add to the CSet 590 // during a mixed GC. 591 uint calc_min_old_cset_length(); 592 593 // Calculate the maximum number of old regions we'll add to the CSet 594 // during a mixed GC. 595 uint calc_max_old_cset_length(); 596 597 // Returns the given amount of uncollected reclaimable space 598 // as a percentage of the current heap capacity. 599 double reclaimable_bytes_perc(size_t reclaimable_bytes); 600 601 public: 602 603 G1CollectorPolicy(); 604 605 virtual G1CollectorPolicy* as_g1_policy() { return this; } 606 607 G1CollectorState* collector_state(); 608 609 G1GCPhaseTimes* phase_times() const { return _phase_times; } 610 611 // Check the current value of the young list RSet lengths and 612 // compare it against the last prediction. If the current value is 613 // higher, recalculate the young list target length prediction. 614 void revise_young_list_target_length_if_necessary(); 615 616 // This should be called after the heap is resized. 617 void record_new_heap_size(uint new_number_of_regions); 618 619 void init(); 620 621 // Create jstat counters for the policy. 622 virtual void initialize_gc_policy_counters(); 623 624 virtual HeapWord* mem_allocate_work(size_t size, 625 bool is_tlab, 626 bool* gc_overhead_limit_was_exceeded); 627 628 // This method controls how a collector handles one or more 629 // of its generations being fully allocated. 630 virtual HeapWord* satisfy_failed_allocation(size_t size, 631 bool is_tlab); 632 633 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 634 635 // Record the start and end of an evacuation pause. 636 void record_collection_pause_start(double start_time_sec); 637 void record_collection_pause_end(double pause_time_ms, size_t cards_scanned); 638 639 // Record the start and end of a full collection. 640 void record_full_collection_start(); 641 void record_full_collection_end(); 642 643 // Must currently be called while the world is stopped. 644 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); 645 646 // Record start and end of remark. 647 void record_concurrent_mark_remark_start(); 648 void record_concurrent_mark_remark_end(); 649 650 // Record start, end, and completion of cleanup. 651 void record_concurrent_mark_cleanup_start(); 652 void record_concurrent_mark_cleanup_end(); 653 void record_concurrent_mark_cleanup_completed(); 654 655 // Records the information about the heap size for reporting in 656 // print_detailed_heap_transition 657 void record_heap_size_info_at_start(bool full); 658 659 // Print heap sizing transition (with less and more detail). 660 661 void print_heap_transition(size_t bytes_before); 662 void print_heap_transition(); 663 void print_detailed_heap_transition(bool full = false); 664 665 void record_stop_world_start(); 666 void record_concurrent_pause(); 667 668 // Record how much space we copied during a GC. This is typically 669 // called when a GC alloc region is being retired. 670 void record_bytes_copied_during_gc(size_t bytes) { 671 _bytes_copied_during_gc += bytes; 672 } 673 674 // The amount of space we copied during a GC. 675 size_t bytes_copied_during_gc() { 676 return _bytes_copied_during_gc; 677 } 678 679 size_t collection_set_bytes_used_before() const { 680 return _collection_set_bytes_used_before; 681 } 682 683 // Determine whether there are candidate regions so that the 684 // next GC should be mixed. The two action strings are used 685 // in the ergo output when the method returns true or false. 686 bool next_gc_should_be_mixed(const char* true_action_str, 687 const char* false_action_str); 688 689 // Choose a new collection set. Marks the chosen regions as being 690 // "in_collection_set", and links them together. The head and number of 691 // the collection set are available via access methods. 692 double finalize_young_cset_part(double target_pause_time_ms); 693 virtual void finalize_old_cset_part(double time_remaining_ms); 694 695 // The head of the list (via "next_in_collection_set()") representing the 696 // current collection set. 697 HeapRegion* collection_set() { return _collection_set; } 698 699 void clear_collection_set() { _collection_set = NULL; } 700 701 // Add old region "hr" to the CSet. 702 void add_old_region_to_cset(HeapRegion* hr); 703 704 // Incremental CSet Support 705 706 // The head of the incrementally built collection set. 707 HeapRegion* inc_cset_head() { return _inc_cset_head; } 708 709 // The tail of the incrementally built collection set. 710 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 711 712 // Initialize incremental collection set info. 713 void start_incremental_cset_building(); 714 715 // Perform any final calculations on the incremental CSet fields 716 // before we can use them. 717 void finalize_incremental_cset_building(); 718 719 void clear_incremental_cset() { 720 _inc_cset_head = NULL; 721 _inc_cset_tail = NULL; 722 } 723 724 // Stop adding regions to the incremental collection set 725 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 726 727 // Add information about hr to the aggregated information for the 728 // incrementally built collection set. 729 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 730 731 // Update information about hr in the aggregated information for 732 // the incrementally built collection set. 733 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 734 735 private: 736 // Update the incremental cset information when adding a region 737 // (should not be called directly). 738 void add_region_to_incremental_cset_common(HeapRegion* hr); 739 740 public: 741 // Add hr to the LHS of the incremental collection set. 742 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 743 744 // Add hr to the RHS of the incremental collection set. 745 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 746 747 #ifndef PRODUCT 748 void print_collection_set(HeapRegion* list_head, outputStream* st); 749 #endif // !PRODUCT 750 751 // This sets the initiate_conc_mark_if_possible() flag to start a 752 // new cycle, as long as we are not already in one. It's best if it 753 // is called during a safepoint when the test whether a cycle is in 754 // progress or not is stable. 755 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 756 757 // This is called at the very beginning of an evacuation pause (it 758 // has to be the first thing that the pause does). If 759 // initiate_conc_mark_if_possible() is true, and the concurrent 760 // marking thread has completed its work during the previous cycle, 761 // it will set during_initial_mark_pause() to so that the pause does 762 // the initial-mark work and start a marking cycle. 763 void decide_on_conc_mark_initiation(); 764 765 // If an expansion would be appropriate, because recent GC overhead had 766 // exceeded the desired limit, return an amount to expand by. 767 virtual size_t expansion_amount(); 768 769 // Print tracing information. 770 void print_tracing_info() const; 771 772 // Print stats on young survival ratio 773 void print_yg_surv_rate_info() const; 774 775 void finished_recalculating_age_indexes(bool is_survivors) { 776 if (is_survivors) { 777 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 778 } else { 779 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 780 } 781 // do that for any other surv rate groups 782 } 783 784 size_t young_list_target_length() const { return _young_list_target_length; } 785 786 bool is_young_list_full(); 787 788 bool can_expand_young_list(); 789 790 uint young_list_max_length() { 791 return _young_list_max_length; 792 } 793 794 bool adaptive_young_list_length() { 795 return _young_gen_sizer->adaptive_young_list_length(); 796 } 797 798 private: 799 // 800 // Survivor regions policy. 801 // 802 803 // Current tenuring threshold, set to 0 if the collector reaches the 804 // maximum amount of survivors regions. 805 uint _tenuring_threshold; 806 807 // The limit on the number of regions allocated for survivors. 808 uint _max_survivor_regions; 809 810 // For reporting purposes. 811 // The value of _heap_bytes_before_gc is also used to calculate 812 // the cost of copying. 813 814 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC 815 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC 816 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC 817 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC 818 819 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC 820 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC 821 822 // The amount of survivor regions after a collection. 823 uint _recorded_survivor_regions; 824 // List of survivor regions. 825 HeapRegion* _recorded_survivor_head; 826 HeapRegion* _recorded_survivor_tail; 827 828 ageTable _survivors_age_table; 829 830 public: 831 uint tenuring_threshold() const { return _tenuring_threshold; } 832 833 static const uint REGIONS_UNLIMITED = (uint) -1; 834 835 uint max_regions(InCSetState dest) { 836 switch (dest.value()) { 837 case InCSetState::Young: 838 return _max_survivor_regions; 839 case InCSetState::Old: 840 return REGIONS_UNLIMITED; 841 default: 842 assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value())); 843 break; 844 } 845 // keep some compilers happy 846 return 0; 847 } 848 849 void note_start_adding_survivor_regions() { 850 _survivor_surv_rate_group->start_adding_regions(); 851 } 852 853 void note_stop_adding_survivor_regions() { 854 _survivor_surv_rate_group->stop_adding_regions(); 855 } 856 857 void record_survivor_regions(uint regions, 858 HeapRegion* head, 859 HeapRegion* tail) { 860 _recorded_survivor_regions = regions; 861 _recorded_survivor_head = head; 862 _recorded_survivor_tail = tail; 863 } 864 865 uint recorded_survivor_regions() { 866 return _recorded_survivor_regions; 867 } 868 869 void record_age_table(ageTable* age_table) { 870 _survivors_age_table.merge(age_table); 871 } 872 873 void update_max_gc_locker_expansion(); 874 875 // Calculates survivor space parameters. 876 void update_survivors_policy(); 877 878 virtual void post_heap_initialize(); 879 }; 880 881 // This should move to some place more general... 882 883 // If we have "n" measurements, and we've kept track of their "sum" and the 884 // "sum_of_squares" of the measurements, this returns the variance of the 885 // sequence. 886 inline double variance(int n, double sum_of_squares, double sum) { 887 double n_d = (double)n; 888 double avg = sum/n_d; 889 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 890 } 891 892 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP