rev 57223 : imported patch 8225484-changes-to-survivor-calculation
1 /* 2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_G1POLICY_HPP 26 #define SHARE_GC_G1_G1POLICY_HPP 27 28 #include "gc/g1/g1CollectorState.hpp" 29 #include "gc/g1/g1GCPhaseTimes.hpp" 30 #include "gc/g1/g1HeapRegionAttr.hpp" 31 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" 32 #include "gc/g1/g1MMUTracker.hpp" 33 #include "gc/g1/g1RemSetTrackingPolicy.hpp" 34 #include "gc/g1/g1Predictions.hpp" 35 #include "gc/g1/g1YoungGenSizer.hpp" 36 #include "gc/shared/gcCause.hpp" 37 #include "utilities/pair.hpp" 38 39 // A G1Policy makes policy decisions that determine the 40 // characteristics of the collector. Examples include: 41 // * choice of collection set. 42 // * when to collect. 43 44 class HeapRegion; 45 class G1CollectionSet; 46 class G1CollectionSetCandidates; 47 class G1CollectionSetChooser; 48 class G1IHOPControl; 49 class G1Analytics; 50 class G1SurvivorRegions; 51 class G1YoungGenSizer; 52 class GCPolicyCounters; 53 class STWGCTimer; 54 55 class G1Policy: public CHeapObj<mtGC> { 56 private: 57 58 static G1IHOPControl* create_ihop_control(const G1Predictions* predictor); 59 // Update the IHOP control with necessary statistics. 60 void update_ihop_prediction(double mutator_time_s, 61 size_t mutator_alloc_bytes, 62 size_t young_gen_size, 63 bool this_gc_was_young_only); 64 void report_ihop_statistics(); 65 66 G1Predictions _predictor; 67 G1Analytics* _analytics; 68 G1RemSetTrackingPolicy _remset_tracker; 69 G1MMUTracker* _mmu_tracker; 70 G1IHOPControl* _ihop_control; 71 72 GCPolicyCounters* _policy_counters; 73 74 double _full_collection_start_sec; 75 76 jlong _collection_pause_end_millis; 77 78 uint _young_list_target_length; 79 uint _young_list_fixed_length; 80 81 // The max number of regions we can extend the eden by while the GC 82 // locker is active. This should be >= _young_list_target_length; 83 uint _young_list_max_length; 84 85 // The survivor rate groups below must be initialized after the predictor because they 86 // indirectly use it through the "this" object passed to their constructor. 87 G1SurvRateGroup* _eden_surv_rate_group; 88 89 size_t _survivor_used_bytes_at_start; 90 size_t _survivor_used_bytes_at_end; 91 92 double _reserve_factor; 93 // This will be set when the heap is expanded 94 // for the first time during initialization. 95 uint _reserve_regions; 96 97 G1YoungGenSizer* _young_gen_sizer; 98 99 uint _free_regions_at_end_of_collection; 100 101 size_t _rs_length; 102 103 size_t _rs_length_prediction; 104 105 size_t _pending_cards_at_gc_start; 106 size_t _pending_cards_at_prev_gc_end; 107 size_t _total_mutator_refined_cards; 108 size_t _total_concurrent_refined_cards; 109 Tickspan _total_concurrent_refinement_time; 110 111 // The amount of allocated bytes in old gen during the last mutator and the following 112 // young GC phase. 113 size_t _bytes_allocated_in_old_since_last_gc; 114 115 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; 116 117 bool should_update_surv_rate_group_predictors() { 118 return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress(); 119 } 120 121 double logged_cards_processing_time() const; 122 public: 123 const G1Predictions& predictor() const { return _predictor; } 124 const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); } 125 126 G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; } 127 128 // Add the given number of bytes to the total number of allocated bytes in the old gen. 129 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } 130 131 void set_region_eden(HeapRegion* hr) { 132 hr->set_eden(); 133 hr->install_surv_rate_group(_eden_surv_rate_group); 134 } 135 136 void record_rs_length(size_t rs_length) { 137 _rs_length = rs_length; 138 } 139 140 double predict_base_elapsed_time_ms(size_t num_pending_cards) const; 141 142 private: 143 double predict_base_elapsed_time_ms(size_t num_pending_cards, size_t rs_length) const; 144 145 double predict_region_copy_time_ms(HeapRegion* hr) const; 146 147 public: 148 149 double predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy = NULL) const; 150 double predict_region_non_copy_time_ms(HeapRegion* hr, bool for_young_gc) const; 151 double predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const; 152 153 void cset_regions_freed() { 154 bool update = should_update_surv_rate_group_predictors(); 155 156 _eden_surv_rate_group->all_surviving_words_recorded(predictor(), update); 157 } 158 159 G1MMUTracker* mmu_tracker() { 160 return _mmu_tracker; 161 } 162 163 const G1MMUTracker* mmu_tracker() const { 164 return _mmu_tracker; 165 } 166 167 double max_pause_time_ms() const { 168 return _mmu_tracker->max_gc_time() * 1000.0; 169 } 170 171 private: 172 G1CollectionSet* _collection_set; 173 double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; 174 double other_time_ms(double pause_time_ms) const; 175 176 double young_other_time_ms() const; 177 double non_young_other_time_ms() const; 178 double constant_other_time_ms(double pause_time_ms) const; 179 180 G1CollectionSetChooser* cset_chooser() const; 181 182 // Stash a pointer to the g1 heap. 183 G1CollectedHeap* _g1h; 184 185 G1GCPhaseTimes* _phase_times; 186 187 // This set of variables tracks the collector efficiency, in order to 188 // determine whether we should initiate a new marking. 189 double _mark_remark_start_sec; 190 double _mark_cleanup_start_sec; 191 192 // Updates the internal young list maximum and target lengths. Returns the 193 // unbounded young list target length. If no rs_length parameter is passed, 194 // predict the RS length using the prediction model, otherwise use the 195 // given rs_length as the prediction. 196 uint update_young_list_max_and_target_length(); 197 uint update_young_list_max_and_target_length(size_t rs_length); 198 199 // Update the young list target length either by setting it to the 200 // desired fixed value or by calculating it using G1's pause 201 // prediction model. 202 // Returns the unbounded young list target length. 203 uint update_young_list_target_length(size_t rs_length); 204 205 // Calculate and return the minimum desired young list target 206 // length. This is the minimum desired young list length according 207 // to the user's inputs. 208 uint calculate_young_list_desired_min_length(uint base_min_length) const; 209 210 // Calculate and return the maximum desired young list target 211 // length. This is the maximum desired young list length according 212 // to the user's inputs. 213 uint calculate_young_list_desired_max_length() const; 214 215 // Calculate and return the maximum young list target length that 216 // can fit into the pause time goal. The parameters are: rs_length 217 // represent the prediction of how large the young RSet lengths will 218 // be, base_min_length is the already existing number of regions in 219 // the young list, min_length and max_length are the desired min and 220 // max young list length according to the user's inputs. 221 uint calculate_young_list_target_length(size_t rs_length, 222 uint base_min_length, 223 uint desired_min_length, 224 uint desired_max_length) const; 225 226 // Result of the bounded_young_list_target_length() method, containing both the 227 // bounded as well as the unbounded young list target lengths in this order. 228 typedef Pair<uint, uint, StackObj> YoungTargetLengths; 229 YoungTargetLengths young_list_target_lengths(size_t rs_length) const; 230 231 void update_rs_length_prediction(); 232 void update_rs_length_prediction(size_t prediction); 233 234 size_t predict_bytes_to_copy(HeapRegion* hr) const; 235 double predict_survivor_regions_evac_time() const; 236 237 // Check whether a given young length (young_length) fits into the 238 // given target pause time and whether the prediction for the amount 239 // of objects to be copied for the given length will fit into the 240 // given free space (expressed by base_free_regions). It is used by 241 // calculate_young_list_target_length(). 242 bool predict_will_fit(uint young_length, double base_time_ms, 243 uint base_free_regions, double target_pause_time_ms) const; 244 245 public: 246 size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; } 247 248 // Calculate the minimum number of old regions we'll add to the CSet 249 // during a mixed GC. 250 uint calc_min_old_cset_length() const; 251 252 // Calculate the maximum number of old regions we'll add to the CSet 253 // during a mixed GC. 254 uint calc_max_old_cset_length() const; 255 256 // Returns the given amount of reclaimable bytes (that represents 257 // the amount of reclaimable space still to be collected) as a 258 // percentage of the current heap capacity. 259 double reclaimable_bytes_percent(size_t reclaimable_bytes) const; 260 261 jlong collection_pause_end_millis() { return _collection_pause_end_millis; } 262 263 private: 264 void clear_collection_set_candidates(); 265 // Sets up marking if proper conditions are met. 266 void maybe_start_marking(); 267 268 // The kind of STW pause. 269 enum PauseKind { 270 FullGC, 271 YoungOnlyGC, 272 MixedGC, 273 LastYoungGC, 274 InitialMarkGC, 275 Cleanup, 276 Remark 277 }; 278 279 // Calculate PauseKind from internal state. 280 PauseKind young_gc_pause_kind() const; 281 // Record the given STW pause with the given start and end times (in s). 282 void record_pause(PauseKind kind, double start, double end); 283 // Indicate that we aborted marking before doing any mixed GCs. 284 void abort_time_to_mixed_tracking(); 285 286 void record_concurrent_refinement_data(bool is_full_collection); 287 288 public: 289 290 G1Policy(STWGCTimer* gc_timer); 291 292 virtual ~G1Policy(); 293 294 static G1Policy* create_policy(STWGCTimer* gc_timer_stw); 295 296 G1CollectorState* collector_state() const; 297 298 G1GCPhaseTimes* phase_times() const { return _phase_times; } 299 300 // Check the current value of the young list RSet length and 301 // compare it against the last prediction. If the current value is 302 // higher, recalculate the young list target length prediction. 303 void revise_young_list_target_length_if_necessary(size_t rs_length); 304 305 // This should be called after the heap is resized. 306 void record_new_heap_size(uint new_number_of_regions); 307 308 virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set); 309 310 void note_gc_start(); 311 312 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 313 314 bool about_to_start_mixed_phase() const; 315 316 // Record the start and end of an evacuation pause. 317 void record_collection_pause_start(double start_time_sec); 318 virtual void record_collection_pause_end(double pause_time_ms); 319 320 // Record the start and end of a full collection. 321 void record_full_collection_start(); 322 virtual void record_full_collection_end(); 323 324 // Must currently be called while the world is stopped. 325 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); 326 327 // Record start and end of remark. 328 void record_concurrent_mark_remark_start(); 329 void record_concurrent_mark_remark_end(); 330 331 // Record start, end, and completion of cleanup. 332 void record_concurrent_mark_cleanup_start(); 333 void record_concurrent_mark_cleanup_end(); 334 335 void print_phases(); 336 337 bool next_gc_should_be_mixed(const char* true_action_str, 338 const char* false_action_str) const; 339 340 // Calculate and return the number of initial and optional old gen regions from 341 // the given collection set candidates and the remaining time. 342 void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, 343 double time_remaining_ms, 344 uint& num_initial_regions, 345 uint& num_optional_regions); 346 347 // Calculate the number of optional regions from the given collection set candidates, 348 // the remaining time and the maximum number of these regions and return the number 349 // of actually selected regions in num_optional_regions. 350 void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, 351 uint const max_optional_regions, 352 double time_remaining_ms, 353 uint& num_optional_regions); 354 355 private: 356 // Set the state to start a concurrent marking cycle and clear 357 // _initiate_conc_mark_if_possible because it has now been 358 // acted on. 359 void initiate_conc_mark(); 360 361 public: 362 // This sets the initiate_conc_mark_if_possible() flag to start a 363 // new cycle, as long as we are not already in one. It's best if it 364 // is called during a safepoint when the test whether a cycle is in 365 // progress or not is stable. 366 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 367 368 // This is called at the very beginning of an evacuation pause (it 369 // has to be the first thing that the pause does). If 370 // initiate_conc_mark_if_possible() is true, and the concurrent 371 // marking thread has completed its work during the previous cycle, 372 // it will set in_initial_mark_gc() to so that the pause does 373 // the initial-mark work and start a marking cycle. 374 void decide_on_conc_mark_initiation(); 375 376 size_t young_list_target_length() const { return _young_list_target_length; } 377 378 bool should_allocate_mutator_region() const; 379 380 bool can_expand_young_list() const; 381 382 uint young_list_max_length() const { 383 return _young_list_max_length; 384 } 385 386 bool use_adaptive_young_list_length() const; 387 388 void transfer_survivors_to_cset(const G1SurvivorRegions* survivors); 389 390 private: 391 // 392 // Survivor regions policy. 393 // 394 395 // Current tenuring threshold, set to 0 if the collector reaches the 396 // maximum amount of survivors regions. 397 uint _tenuring_threshold; 398 399 // The limit on the number of regions allocated for survivors. 400 uint _max_survivor_regions; 401 402 AgeTable _survivors_age_table; 403 404 size_t _surviving_survivor_words; 405 406 size_t desired_survivor_size(uint max_regions) const; 407 408 // Fraction used when predicting how many optional regions to include in 409 // the CSet. This fraction of the available time is used for optional regions, 410 // the rest is used to add old regions to the normal CSet. 411 double optional_prediction_fraction() { return 0.2; } 412 413 public: 414 // Fraction used when evacuating the optional regions. This fraction of the 415 // remaining time is used to choose what regions to include in the evacuation. 416 double optional_evacuation_fraction() { return 0.75; } 417 418 uint tenuring_threshold() const { return _tenuring_threshold; } 419 420 uint max_survivor_regions() { 421 return _max_survivor_regions; 422 } 423 424 void record_age_table(AgeTable* age_table) { 425 _survivors_age_table.merge(age_table); 426 } 427 428 void record_surviving_survivor_words(size_t words) { 429 _surviving_survivor_words += words; 430 } 431 432 void print_age_table(); 433 434 void update_max_gc_locker_expansion(); 435 436 void update_survivors_policy(); 437 438 virtual bool force_upgrade_to_full() { 439 return false; 440 } 441 }; 442 443 #endif // SHARE_GC_G1_G1POLICY_HPP --- EOF ---