1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc_implementation/g1/collectionSetChooser.hpp" 29 #include "gc_implementation/g1/g1MMUTracker.hpp" 30 #include "memory/collectorPolicy.hpp" 31 32 // A G1CollectorPolicy makes policy decisions that determine the 33 // characteristics of the collector. Examples include: 34 // * choice of collection set. 35 // * when to collect. 36 37 class HeapRegion; 38 class CollectionSetChooser; 39 class G1GCPhaseTimes; 40 41 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses 42 // (the latter may contain non-young regions - i.e. regions that are 43 // technically in Gen1) while TraceGen1Time collects data about full GCs. 44 class TraceGen0TimeData : public CHeapObj<mtGC> { 45 private: 46 unsigned _young_pause_num; 47 unsigned _mixed_pause_num; 48 49 NumberSeq _all_stop_world_times_ms; 50 NumberSeq _all_yield_times_ms; 51 52 NumberSeq _total; 53 NumberSeq _other; 54 NumberSeq _root_region_scan_wait; 55 NumberSeq _parallel; 56 NumberSeq _ext_root_scan; 57 NumberSeq _satb_filtering; 58 NumberSeq _update_rs; 59 NumberSeq _scan_rs; 60 NumberSeq _obj_copy; 61 NumberSeq _termination; 62 NumberSeq _parallel_other; 63 NumberSeq _clear_ct; 64 65 void print_summary(const char* str, const NumberSeq* seq) const; 66 void print_summary_sd(const char* str, const NumberSeq* seq) const; 67 68 public: 69 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; 70 void record_start_collection(double time_to_stop_the_world_ms); 71 void record_yield_time(double yield_time_ms); 72 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times); 73 void increment_young_collection_count(); 74 void increment_mixed_collection_count(); 75 void print() const; 76 }; 77 78 class TraceGen1TimeData : public CHeapObj<mtGC> { 79 private: 80 NumberSeq _all_full_gc_times; 81 82 public: 83 void record_full_collection(double full_gc_time_ms); 84 void print() const; 85 }; 86 87 // There are three command line options related to the young gen size: 88 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is 89 // just a short form for NewSize==MaxNewSize). G1 will use its internal 90 // heuristics to calculate the actual young gen size, so these options 91 // basically only limit the range within which G1 can pick a young gen 92 // size. Also, these are general options taking byte sizes. G1 will 93 // internally work with a number of regions instead. So, some rounding 94 // will occur. 95 // 96 // If nothing related to the the young gen size is set on the command 97 // line we should allow the young gen to be between G1NewSizePercent 98 // and G1MaxNewSizePercent of the heap size. This means that every time 99 // the heap size changes, the limits for the young gen size will be 100 // recalculated. 101 // 102 // If only -XX:NewSize is set we should use the specified value as the 103 // minimum size for young gen. Still using G1MaxNewSizePercent of the 104 // heap as maximum. 105 // 106 // If only -XX:MaxNewSize is set we should use the specified value as the 107 // maximum size for young gen. Still using G1NewSizePercent of the heap 108 // as minimum. 109 // 110 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. 111 // No updates when the heap size changes. There is a special case when 112 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a 113 // different heuristic for calculating the collection set when we do mixed 114 // collection. 115 // 116 // If only -XX:NewRatio is set we should use the specified ratio of the heap 117 // as both min and max. This will be interpreted as "fixed" just like the 118 // NewSize==MaxNewSize case above. But we will update the min and max 119 // every time the heap size changes. 120 // 121 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is 122 // combined with either NewSize or MaxNewSize. (A warning message is printed.) 123 class G1YoungGenSizer : public CHeapObj<mtGC> { 124 private: 125 enum SizerKind { 126 SizerDefaults, 127 SizerNewSizeOnly, 128 SizerMaxNewSizeOnly, 129 SizerMaxAndNewSize, 130 SizerNewRatio 131 }; 132 SizerKind _sizer_kind; 133 uint _min_desired_young_length; 134 uint _max_desired_young_length; 135 bool _adaptive_size; 136 uint calculate_default_min_length(uint new_number_of_heap_regions); 137 uint calculate_default_max_length(uint new_number_of_heap_regions); 138 139 // Update the given values for minimum and maximum young gen length in regions 140 // given the number of heap regions depending on the kind of sizing algorithm. 141 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); 142 143 public: 144 G1YoungGenSizer(); 145 // Calculate the maximum length of the young gen given the number of regions 146 // depending on the sizing algorithm. 147 uint max_young_length(uint number_of_heap_regions); 148 149 void heap_size_changed(uint new_number_of_heap_regions); 150 uint min_desired_young_length() { 151 return _min_desired_young_length; 152 } 153 uint max_desired_young_length() { 154 return _max_desired_young_length; 155 } 156 bool adaptive_young_list_length() { 157 return _adaptive_size; 158 } 159 }; 160 161 class G1CollectorPolicy: public CollectorPolicy { 162 private: 163 // either equal to the number of parallel threads, if ParallelGCThreads 164 // has been set, or 1 otherwise 165 int _parallel_gc_threads; 166 167 // The number of GC threads currently active. 168 uintx _no_of_gc_threads; 169 170 enum SomePrivateConstants { 171 NumPrevPausesForHeuristics = 10 172 }; 173 174 G1MMUTracker* _mmu_tracker; 175 176 void initialize_flags(); 177 178 CollectionSetChooser* _collectionSetChooser; 179 180 double _full_collection_start_sec; 181 uint _cur_collection_pause_used_regions_at_start; 182 183 // These exclude marking times. 184 TruncatedSeq* _recent_gc_times_ms; 185 186 TruncatedSeq* _concurrent_mark_remark_times_ms; 187 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 188 189 TraceGen0TimeData _trace_gen0_time_data; 190 TraceGen1TimeData _trace_gen1_time_data; 191 192 double _stop_world_start; 193 194 // indicates whether we are in young or mixed GC mode 195 bool _gcs_are_young; 196 197 uint _young_list_target_length; 198 uint _young_list_fixed_length; 199 200 // The max number of regions we can extend the eden by while the GC 201 // locker is active. This should be >= _young_list_target_length; 202 uint _young_list_max_length; 203 204 bool _last_gc_was_young; 205 206 bool _during_marking; 207 bool _in_marking_window; 208 bool _in_marking_window_im; 209 210 SurvRateGroup* _short_lived_surv_rate_group; 211 SurvRateGroup* _survivor_surv_rate_group; 212 // add here any more surv rate groups 213 214 double _gc_overhead_perc; 215 216 double _reserve_factor; 217 uint _reserve_regions; 218 219 bool during_marking() { 220 return _during_marking; 221 } 222 223 enum PredictionConstants { 224 TruncatedSeqLength = 10 225 }; 226 227 TruncatedSeq* _alloc_rate_ms_seq; 228 double _prev_collection_pause_end_ms; 229 230 TruncatedSeq* _rs_length_diff_seq; 231 TruncatedSeq* _cost_per_card_ms_seq; 232 TruncatedSeq* _young_cards_per_entry_ratio_seq; 233 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 234 TruncatedSeq* _cost_per_entry_ms_seq; 235 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 236 TruncatedSeq* _cost_per_byte_ms_seq; 237 TruncatedSeq* _constant_other_time_ms_seq; 238 TruncatedSeq* _young_other_cost_per_region_ms_seq; 239 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 240 241 TruncatedSeq* _pending_cards_seq; 242 TruncatedSeq* _rs_lengths_seq; 243 244 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 245 246 G1YoungGenSizer* _young_gen_sizer; 247 248 uint _eden_cset_region_length; 249 uint _survivor_cset_region_length; 250 uint _old_cset_region_length; 251 252 void init_cset_region_lengths(uint eden_cset_region_length, 253 uint survivor_cset_region_length); 254 255 uint eden_cset_region_length() { return _eden_cset_region_length; } 256 uint survivor_cset_region_length() { return _survivor_cset_region_length; } 257 uint old_cset_region_length() { return _old_cset_region_length; } 258 259 uint _free_regions_at_end_of_collection; 260 261 size_t _recorded_rs_lengths; 262 size_t _max_rs_lengths; 263 double _sigma; 264 265 size_t _rs_lengths_prediction; 266 267 double sigma() { return _sigma; } 268 269 // A function that prevents us putting too much stock in small sample 270 // sets. Returns a number between 2.0 and 1.0, depending on the number 271 // of samples. 5 or more samples yields one; fewer scales linearly from 272 // 2.0 at 1 sample to 1.0 at 5. 273 double confidence_factor(int samples) { 274 if (samples > 4) return 1.0; 275 else return 1.0 + sigma() * ((double)(5 - samples))/2.0; 276 } 277 278 double get_new_neg_prediction(TruncatedSeq* seq) { 279 return seq->davg() - sigma() * seq->dsd(); 280 } 281 282 #ifndef PRODUCT 283 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 284 #endif // PRODUCT 285 286 void adjust_concurrent_refinement(double update_rs_time, 287 double update_rs_processed_buffers, 288 double goal_ms); 289 290 uintx no_of_gc_threads() { return _no_of_gc_threads; } 291 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; } 292 293 double _pause_time_target_ms; 294 295 size_t _pending_cards; 296 297 public: 298 // Accessors 299 300 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 301 hr->set_young(); 302 hr->install_surv_rate_group(_short_lived_surv_rate_group); 303 hr->set_young_index_in_cset(young_index_in_cset); 304 } 305 306 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 307 assert(hr->is_young() && hr->is_survivor(), "pre-condition"); 308 hr->install_surv_rate_group(_survivor_surv_rate_group); 309 hr->set_young_index_in_cset(young_index_in_cset); 310 } 311 312 #ifndef PRODUCT 313 bool verify_young_ages(); 314 #endif // PRODUCT 315 316 double get_new_prediction(TruncatedSeq* seq) { 317 return MAX2(seq->davg() + sigma() * seq->dsd(), 318 seq->davg() * confidence_factor(seq->num())); 319 } 320 321 void record_max_rs_lengths(size_t rs_lengths) { 322 _max_rs_lengths = rs_lengths; 323 } 324 325 size_t predict_rs_length_diff() { 326 return (size_t) get_new_prediction(_rs_length_diff_seq); 327 } 328 329 double predict_alloc_rate_ms() { 330 return get_new_prediction(_alloc_rate_ms_seq); 331 } 332 333 double predict_cost_per_card_ms() { 334 return get_new_prediction(_cost_per_card_ms_seq); 335 } 336 337 double predict_rs_update_time_ms(size_t pending_cards) { 338 return (double) pending_cards * predict_cost_per_card_ms(); 339 } 340 341 double predict_young_cards_per_entry_ratio() { 342 return get_new_prediction(_young_cards_per_entry_ratio_seq); 343 } 344 345 double predict_mixed_cards_per_entry_ratio() { 346 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 347 return predict_young_cards_per_entry_ratio(); 348 } else { 349 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 350 } 351 } 352 353 size_t predict_young_card_num(size_t rs_length) { 354 return (size_t) ((double) rs_length * 355 predict_young_cards_per_entry_ratio()); 356 } 357 358 size_t predict_non_young_card_num(size_t rs_length) { 359 return (size_t) ((double) rs_length * 360 predict_mixed_cards_per_entry_ratio()); 361 } 362 363 double predict_rs_scan_time_ms(size_t card_num) { 364 if (gcs_are_young()) { 365 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 366 } else { 367 return predict_mixed_rs_scan_time_ms(card_num); 368 } 369 } 370 371 double predict_mixed_rs_scan_time_ms(size_t card_num) { 372 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 373 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); 374 } else { 375 return (double) (card_num * 376 get_new_prediction(_mixed_cost_per_entry_ms_seq)); 377 } 378 } 379 380 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { 381 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 382 return (1.1 * (double) bytes_to_copy) * 383 get_new_prediction(_cost_per_byte_ms_seq); 384 } else { 385 return (double) bytes_to_copy * 386 get_new_prediction(_cost_per_byte_ms_during_cm_seq); 387 } 388 } 389 390 double predict_object_copy_time_ms(size_t bytes_to_copy) { 391 if (_in_marking_window && !_in_marking_window_im) { 392 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 393 } else { 394 return (double) bytes_to_copy * 395 get_new_prediction(_cost_per_byte_ms_seq); 396 } 397 } 398 399 double predict_constant_other_time_ms() { 400 return get_new_prediction(_constant_other_time_ms_seq); 401 } 402 403 double predict_young_other_time_ms(size_t young_num) { 404 return (double) young_num * 405 get_new_prediction(_young_other_cost_per_region_ms_seq); 406 } 407 408 double predict_non_young_other_time_ms(size_t non_young_num) { 409 return (double) non_young_num * 410 get_new_prediction(_non_young_other_cost_per_region_ms_seq); 411 } 412 413 double predict_base_elapsed_time_ms(size_t pending_cards); 414 double predict_base_elapsed_time_ms(size_t pending_cards, 415 size_t scanned_cards); 416 size_t predict_bytes_to_copy(HeapRegion* hr); 417 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc); 418 419 void set_recorded_rs_lengths(size_t rs_lengths); 420 421 uint cset_region_length() { return young_cset_region_length() + 422 old_cset_region_length(); } 423 uint young_cset_region_length() { return eden_cset_region_length() + 424 survivor_cset_region_length(); } 425 426 double predict_survivor_regions_evac_time(); 427 428 void cset_regions_freed() { 429 bool propagate = _last_gc_was_young && !_in_marking_window; 430 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); 431 _survivor_surv_rate_group->all_surviving_words_recorded(propagate); 432 // also call it on any more surv rate groups 433 } 434 435 G1MMUTracker* mmu_tracker() { 436 return _mmu_tracker; 437 } 438 439 double max_pause_time_ms() { 440 return _mmu_tracker->max_gc_time() * 1000.0; 441 } 442 443 double predict_remark_time_ms() { 444 return get_new_prediction(_concurrent_mark_remark_times_ms); 445 } 446 447 double predict_cleanup_time_ms() { 448 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 449 } 450 451 // Returns an estimate of the survival rate of the region at yg-age 452 // "yg_age". 453 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { 454 TruncatedSeq* seq = surv_rate_group->get_seq(age); 455 if (seq->num() == 0) 456 gclog_or_tty->print("BARF! age is %d", age); 457 guarantee( seq->num() > 0, "invariant" ); 458 double pred = get_new_prediction(seq); 459 if (pred > 1.0) 460 pred = 1.0; 461 return pred; 462 } 463 464 double predict_yg_surv_rate(int age) { 465 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 466 } 467 468 double accum_yg_surv_rate_pred(int age) { 469 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 470 } 471 472 private: 473 // Statistics kept per GC stoppage, pause or full. 474 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 475 476 // Add a new GC of the given duration and end time to the record. 477 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 478 479 // The head of the list (via "next_in_collection_set()") representing the 480 // current collection set. Set from the incrementally built collection 481 // set at the start of the pause. 482 HeapRegion* _collection_set; 483 484 // The number of bytes in the collection set before the pause. Set from 485 // the incrementally built collection set at the start of an evacuation 486 // pause, and incremented in finalize_cset() when adding old regions 487 // (if any) to the collection set. 488 size_t _collection_set_bytes_used_before; 489 490 // The number of bytes copied during the GC. 491 size_t _bytes_copied_during_gc; 492 493 // The associated information that is maintained while the incremental 494 // collection set is being built with young regions. Used to populate 495 // the recorded info for the evacuation pause. 496 497 enum CSetBuildType { 498 Active, // We are actively building the collection set 499 Inactive // We are not actively building the collection set 500 }; 501 502 CSetBuildType _inc_cset_build_state; 503 504 // The head of the incrementally built collection set. 505 HeapRegion* _inc_cset_head; 506 507 // The tail of the incrementally built collection set. 508 HeapRegion* _inc_cset_tail; 509 510 // The number of bytes in the incrementally built collection set. 511 // Used to set _collection_set_bytes_used_before at the start of 512 // an evacuation pause. 513 size_t _inc_cset_bytes_used_before; 514 515 // Used to record the highest end of heap region in collection set 516 HeapWord* _inc_cset_max_finger; 517 518 // The RSet lengths recorded for regions in the CSet. It is updated 519 // by the thread that adds a new region to the CSet. We assume that 520 // only one thread can be allocating a new CSet region (currently, 521 // it does so after taking the Heap_lock) hence no need to 522 // synchronize updates to this field. 523 size_t _inc_cset_recorded_rs_lengths; 524 525 // A concurrent refinement thread periodcially samples the young 526 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 527 // the RSets grow. Instead of having to syncronize updates to that 528 // field we accumulate them in this field and add it to 529 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 530 ssize_t _inc_cset_recorded_rs_lengths_diffs; 531 532 // The predicted elapsed time it will take to collect the regions in 533 // the CSet. This is updated by the thread that adds a new region to 534 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 535 // MT-safety assumptions. 536 double _inc_cset_predicted_elapsed_time_ms; 537 538 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 539 double _inc_cset_predicted_elapsed_time_ms_diffs; 540 541 // Stash a pointer to the g1 heap. 542 G1CollectedHeap* _g1; 543 544 G1GCPhaseTimes* _phase_times; 545 546 // The ratio of gc time to elapsed time, computed over recent pauses. 547 double _recent_avg_pause_time_ratio; 548 549 double recent_avg_pause_time_ratio() { 550 return _recent_avg_pause_time_ratio; 551 } 552 553 // At the end of a pause we check the heap occupancy and we decide 554 // whether we will start a marking cycle during the next pause. If 555 // we decide that we want to do that, we will set this parameter to 556 // true. So, this parameter will stay true between the end of a 557 // pause and the beginning of a subsequent pause (not necessarily 558 // the next one, see the comments on the next field) when we decide 559 // that we will indeed start a marking cycle and do the initial-mark 560 // work. 561 volatile bool _initiate_conc_mark_if_possible; 562 563 // If initiate_conc_mark_if_possible() is set at the beginning of a 564 // pause, it is a suggestion that the pause should start a marking 565 // cycle by doing the initial-mark work. However, it is possible 566 // that the concurrent marking thread is still finishing up the 567 // previous marking cycle (e.g., clearing the next marking 568 // bitmap). If that is the case we cannot start a new cycle and 569 // we'll have to wait for the concurrent marking thread to finish 570 // what it is doing. In this case we will postpone the marking cycle 571 // initiation decision for the next pause. When we eventually decide 572 // to start a cycle, we will set _during_initial_mark_pause which 573 // will stay true until the end of the initial-mark pause and it's 574 // the condition that indicates that a pause is doing the 575 // initial-mark work. 576 volatile bool _during_initial_mark_pause; 577 578 bool _last_young_gc; 579 580 // This set of variables tracks the collector efficiency, in order to 581 // determine whether we should initiate a new marking. 582 double _cur_mark_stop_world_time_ms; 583 double _mark_remark_start_sec; 584 double _mark_cleanup_start_sec; 585 586 // Update the young list target length either by setting it to the 587 // desired fixed value or by calculating it using G1's pause 588 // prediction model. If no rs_lengths parameter is passed, predict 589 // the RS lengths using the prediction model, otherwise use the 590 // given rs_lengths as the prediction. 591 void update_young_list_target_length(size_t rs_lengths = (size_t) -1); 592 593 // Calculate and return the minimum desired young list target 594 // length. This is the minimum desired young list length according 595 // to the user's inputs. 596 uint calculate_young_list_desired_min_length(uint base_min_length); 597 598 // Calculate and return the maximum desired young list target 599 // length. This is the maximum desired young list length according 600 // to the user's inputs. 601 uint calculate_young_list_desired_max_length(); 602 603 // Calculate and return the maximum young list target length that 604 // can fit into the pause time goal. The parameters are: rs_lengths 605 // represent the prediction of how large the young RSet lengths will 606 // be, base_min_length is the alreay existing number of regions in 607 // the young list, min_length and max_length are the desired min and 608 // max young list length according to the user's inputs. 609 uint calculate_young_list_target_length(size_t rs_lengths, 610 uint base_min_length, 611 uint desired_min_length, 612 uint desired_max_length); 613 614 // Check whether a given young length (young_length) fits into the 615 // given target pause time and whether the prediction for the amount 616 // of objects to be copied for the given length will fit into the 617 // given free space (expressed by base_free_regions). It is used by 618 // calculate_young_list_target_length(). 619 bool predict_will_fit(uint young_length, double base_time_ms, 620 uint base_free_regions, double target_pause_time_ms); 621 622 // Calculate the minimum number of old regions we'll add to the CSet 623 // during a mixed GC. 624 uint calc_min_old_cset_length(); 625 626 // Calculate the maximum number of old regions we'll add to the CSet 627 // during a mixed GC. 628 uint calc_max_old_cset_length(); 629 630 // Returns the given amount of uncollected reclaimable space 631 // as a percentage of the current heap capacity. 632 double reclaimable_bytes_perc(size_t reclaimable_bytes); 633 634 public: 635 636 G1CollectorPolicy(); 637 638 virtual G1CollectorPolicy* as_g1_policy() { return this; } 639 640 virtual CollectorPolicy::Name kind() { 641 return CollectorPolicy::G1CollectorPolicyKind; 642 } 643 644 G1GCPhaseTimes* phase_times() const { return _phase_times; } 645 646 // Check the current value of the young list RSet lengths and 647 // compare it against the last prediction. If the current value is 648 // higher, recalculate the young list target length prediction. 649 void revise_young_list_target_length_if_necessary(); 650 651 // This should be called after the heap is resized. 652 void record_new_heap_size(uint new_number_of_regions); 653 654 void init(); 655 656 // Create jstat counters for the policy. 657 virtual void initialize_gc_policy_counters(); 658 659 virtual HeapWord* mem_allocate_work(size_t size, 660 bool is_tlab, 661 bool* gc_overhead_limit_was_exceeded); 662 663 // This method controls how a collector handles one or more 664 // of its generations being fully allocated. 665 virtual HeapWord* satisfy_failed_allocation(size_t size, 666 bool is_tlab); 667 668 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } 669 670 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 671 672 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 673 674 // Record the start and end of an evacuation pause. 675 void record_collection_pause_start(double start_time_sec); 676 void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info); 677 678 // Record the start and end of a full collection. 679 void record_full_collection_start(); 680 void record_full_collection_end(); 681 682 // Must currently be called while the world is stopped. 683 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); 684 685 // Record start and end of remark. 686 void record_concurrent_mark_remark_start(); 687 void record_concurrent_mark_remark_end(); 688 689 // Record start, end, and completion of cleanup. 690 void record_concurrent_mark_cleanup_start(); 691 void record_concurrent_mark_cleanup_end(int no_of_gc_threads); 692 void record_concurrent_mark_cleanup_completed(); 693 694 // Records the information about the heap size for reporting in 695 // print_detailed_heap_transition 696 void record_heap_size_info_at_start(bool full); 697 698 // Print heap sizing transition (with less and more detail). 699 void print_heap_transition(); 700 void print_detailed_heap_transition(bool full = false); 701 702 void record_stop_world_start(); 703 void record_concurrent_pause(); 704 705 // Record how much space we copied during a GC. This is typically 706 // called when a GC alloc region is being retired. 707 void record_bytes_copied_during_gc(size_t bytes) { 708 _bytes_copied_during_gc += bytes; 709 } 710 711 // The amount of space we copied during a GC. 712 size_t bytes_copied_during_gc() { 713 return _bytes_copied_during_gc; 714 } 715 716 // Determine whether there are candidate regions so that the 717 // next GC should be mixed. The two action strings are used 718 // in the ergo output when the method returns true or false. 719 bool next_gc_should_be_mixed(const char* true_action_str, 720 const char* false_action_str); 721 722 // Choose a new collection set. Marks the chosen regions as being 723 // "in_collection_set", and links them together. The head and number of 724 // the collection set are available via access methods. 725 void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info); 726 727 // The head of the list (via "next_in_collection_set()") representing the 728 // current collection set. 729 HeapRegion* collection_set() { return _collection_set; } 730 731 void clear_collection_set() { _collection_set = NULL; } 732 733 // Add old region "hr" to the CSet. 734 void add_old_region_to_cset(HeapRegion* hr); 735 736 // Incremental CSet Support 737 738 // The head of the incrementally built collection set. 739 HeapRegion* inc_cset_head() { return _inc_cset_head; } 740 741 // The tail of the incrementally built collection set. 742 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 743 744 // Initialize incremental collection set info. 745 void start_incremental_cset_building(); 746 747 // Perform any final calculations on the incremental CSet fields 748 // before we can use them. 749 void finalize_incremental_cset_building(); 750 751 void clear_incremental_cset() { 752 _inc_cset_head = NULL; 753 _inc_cset_tail = NULL; 754 } 755 756 // Stop adding regions to the incremental collection set 757 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 758 759 // Add information about hr to the aggregated information for the 760 // incrementally built collection set. 761 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 762 763 // Update information about hr in the aggregated information for 764 // the incrementally built collection set. 765 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 766 767 private: 768 // Update the incremental cset information when adding a region 769 // (should not be called directly). 770 void add_region_to_incremental_cset_common(HeapRegion* hr); 771 772 public: 773 // Add hr to the LHS of the incremental collection set. 774 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 775 776 // Add hr to the RHS of the incremental collection set. 777 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 778 779 #ifndef PRODUCT 780 void print_collection_set(HeapRegion* list_head, outputStream* st); 781 #endif // !PRODUCT 782 783 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 784 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 785 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 786 787 bool during_initial_mark_pause() { return _during_initial_mark_pause; } 788 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 789 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 790 791 // This sets the initiate_conc_mark_if_possible() flag to start a 792 // new cycle, as long as we are not already in one. It's best if it 793 // is called during a safepoint when the test whether a cycle is in 794 // progress or not is stable. 795 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 796 797 // This is called at the very beginning of an evacuation pause (it 798 // has to be the first thing that the pause does). If 799 // initiate_conc_mark_if_possible() is true, and the concurrent 800 // marking thread has completed its work during the previous cycle, 801 // it will set during_initial_mark_pause() to so that the pause does 802 // the initial-mark work and start a marking cycle. 803 void decide_on_conc_mark_initiation(); 804 805 // If an expansion would be appropriate, because recent GC overhead had 806 // exceeded the desired limit, return an amount to expand by. 807 size_t expansion_amount(); 808 809 // Print tracing information. 810 void print_tracing_info() const; 811 812 // Print stats on young survival ratio 813 void print_yg_surv_rate_info() const; 814 815 void finished_recalculating_age_indexes(bool is_survivors) { 816 if (is_survivors) { 817 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 818 } else { 819 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 820 } 821 // do that for any other surv rate groups 822 } 823 824 bool is_young_list_full() { 825 uint young_list_length = _g1->young_list()->length(); 826 uint young_list_target_length = _young_list_target_length; 827 return young_list_length >= young_list_target_length; 828 } 829 830 bool can_expand_young_list() { 831 uint young_list_length = _g1->young_list()->length(); 832 uint young_list_max_length = _young_list_max_length; 833 return young_list_length < young_list_max_length; 834 } 835 836 uint young_list_max_length() { 837 return _young_list_max_length; 838 } 839 840 bool gcs_are_young() { 841 return _gcs_are_young; 842 } 843 void set_gcs_are_young(bool gcs_are_young) { 844 _gcs_are_young = gcs_are_young; 845 } 846 847 bool adaptive_young_list_length() { 848 return _young_gen_sizer->adaptive_young_list_length(); 849 } 850 851 private: 852 // 853 // Survivor regions policy. 854 // 855 856 // Current tenuring threshold, set to 0 if the collector reaches the 857 // maximum amount of survivors regions. 858 uint _tenuring_threshold; 859 860 // The limit on the number of regions allocated for survivors. 861 uint _max_survivor_regions; 862 863 // For reporting purposes. 864 // The value of _heap_bytes_before_gc is also used to calculate 865 // the cost of copying. 866 867 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC 868 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC 869 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC 870 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC 871 872 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC 873 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC 874 875 // The amount of survivor regions after a collection. 876 uint _recorded_survivor_regions; 877 // List of survivor regions. 878 HeapRegion* _recorded_survivor_head; 879 HeapRegion* _recorded_survivor_tail; 880 881 ageTable _survivors_age_table; 882 883 public: 884 uint tenuring_threshold() const { return _tenuring_threshold; } 885 886 inline GCAllocPurpose 887 evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) { 888 if (age < _tenuring_threshold && src_region->is_young()) { 889 return GCAllocForSurvived; 890 } else { 891 return GCAllocForTenured; 892 } 893 } 894 895 inline bool track_object_age(GCAllocPurpose purpose) { 896 return purpose == GCAllocForSurvived; 897 } 898 899 static const uint REGIONS_UNLIMITED = (uint) -1; 900 901 uint max_regions(int purpose); 902 903 // The limit on regions for a particular purpose is reached. 904 void note_alloc_region_limit_reached(int purpose) { 905 if (purpose == GCAllocForSurvived) { 906 _tenuring_threshold = 0; 907 } 908 } 909 910 void note_start_adding_survivor_regions() { 911 _survivor_surv_rate_group->start_adding_regions(); 912 } 913 914 void note_stop_adding_survivor_regions() { 915 _survivor_surv_rate_group->stop_adding_regions(); 916 } 917 918 void record_survivor_regions(uint regions, 919 HeapRegion* head, 920 HeapRegion* tail) { 921 _recorded_survivor_regions = regions; 922 _recorded_survivor_head = head; 923 _recorded_survivor_tail = tail; 924 } 925 926 uint recorded_survivor_regions() { 927 return _recorded_survivor_regions; 928 } 929 930 void record_thread_age_table(ageTable* age_table) { 931 _survivors_age_table.merge_par(age_table); 932 } 933 934 void update_max_gc_locker_expansion(); 935 936 // Calculates survivor space parameters. 937 void update_survivors_policy(); 938 939 virtual void post_heap_initialize(); 940 }; 941 942 // This should move to some place more general... 943 944 // If we have "n" measurements, and we've kept track of their "sum" and the 945 // "sum_of_squares" of the measurements, this returns the variance of the 946 // sequence. 947 inline double variance(int n, double sum_of_squares, double sum) { 948 double n_d = (double)n; 949 double avg = sum/n_d; 950 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; 951 } 952 953 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP