67 G1Analytics* _analytics;
68 G1RemSetTrackingPolicy _remset_tracker;
69 G1MMUTracker* _mmu_tracker;
70 G1IHOPControl* _ihop_control;
71
72 GCPolicyCounters* _policy_counters;
73
74 double _full_collection_start_sec;
75
76 jlong _collection_pause_end_millis;
77
78 uint _young_list_target_length;
79 uint _young_list_fixed_length;
80
81 // The max number of regions we can extend the eden by while the GC
82 // locker is active. This should be >= _young_list_target_length;
83 uint _young_list_max_length;
84
85 // SurvRateGroups below must be initialized after the predictor because they
86 // indirectly use it through this object passed to their constructor.
87 SurvRateGroup* _short_lived_surv_rate_group;
88 SurvRateGroup* _survivor_surv_rate_group;
89
90 double _reserve_factor;
91 // This will be set when the heap is expanded
92 // for the first time during initialization.
93 uint _reserve_regions;
94
95 G1YoungGenSizer* _young_gen_sizer;
96
97 uint _free_regions_at_end_of_collection;
98
99 size_t _rs_length;
100
101 size_t _rs_length_prediction;
102
103 size_t _pending_cards_at_gc_start;
104 size_t _pending_cards_at_prev_gc_end;
105 size_t _total_mutator_refined_cards;
106 size_t _total_concurrent_refined_cards;
107 Tickspan _total_concurrent_refinement_time;
111 size_t _bytes_allocated_in_old_since_last_gc;
112
113 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
114
115 bool should_update_surv_rate_group_predictors() {
116 return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
117 }
118
119 double logged_cards_processing_time() const;
120 public:
121 const G1Predictions& predictor() const { return _predictor; }
122 const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
123
124 G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
125
126 // Add the given number of bytes to the total number of allocated bytes in the old gen.
127 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
128
129 void set_region_eden(HeapRegion* hr) {
130 hr->set_eden();
131 hr->install_surv_rate_group(_short_lived_surv_rate_group);
132 }
133
134 void set_region_survivor(HeapRegion* hr) {
135 assert(hr->is_survivor(), "pre-condition");
136 hr->install_surv_rate_group(_survivor_surv_rate_group);
137 }
138
139 void record_rs_length(size_t rs_length) {
140 _rs_length = rs_length;
141 }
142
143 double predict_base_elapsed_time_ms(size_t num_pending_cards) const;
144 double predict_base_elapsed_time_ms(size_t num_pending_cards,
145 size_t rs_length) const;
146 size_t predict_bytes_to_copy(HeapRegion* hr) const;
147 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
148
149 double predict_survivor_regions_evac_time() const;
150
151 void cset_regions_freed() {
152 bool update = should_update_surv_rate_group_predictors();
153
154 _short_lived_surv_rate_group->all_surviving_words_recorded(predictor(), update);
155 _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update);
156 }
157
158 G1MMUTracker* mmu_tracker() {
159 return _mmu_tracker;
160 }
161
162 const G1MMUTracker* mmu_tracker() const {
163 return _mmu_tracker;
164 }
165
166 double max_pause_time_ms() const {
167 return _mmu_tracker->max_gc_time() * 1000.0;
168 }
169
170 double accum_yg_surv_rate_pred(int age) const;
171
172 private:
173 G1CollectionSet* _collection_set;
174 double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
175 double other_time_ms(double pause_time_ms) const;
176
177 double young_other_time_ms() const;
178 double non_young_other_time_ms() const;
179 double constant_other_time_ms(double pause_time_ms) const;
180
181 G1CollectionSetChooser* cset_chooser() const;
182
183 // Stash a pointer to the g1 heap.
184 G1CollectedHeap* _g1h;
185
186 G1GCPhaseTimes* _phase_times;
187
188 // This set of variables tracks the collector efficiency, in order to
189 // determine whether we should initiate a new marking.
190 double _mark_remark_start_sec;
191 double _mark_cleanup_start_sec;
214 uint calculate_young_list_desired_max_length() const;
215
216 // Calculate and return the maximum young list target length that
217 // can fit into the pause time goal. The parameters are: rs_length
218 // represent the prediction of how large the young RSet lengths will
219 // be, base_min_length is the already existing number of regions in
220 // the young list, min_length and max_length are the desired min and
221 // max young list length according to the user's inputs.
222 uint calculate_young_list_target_length(size_t rs_length,
223 uint base_min_length,
224 uint desired_min_length,
225 uint desired_max_length) const;
226
227 // Result of the bounded_young_list_target_length() method, containing both the
228 // bounded as well as the unbounded young list target lengths in this order.
229 typedef Pair<uint, uint, StackObj> YoungTargetLengths;
230 YoungTargetLengths young_list_target_lengths(size_t rs_length) const;
231
232 void update_rs_length_prediction();
233 void update_rs_length_prediction(size_t prediction);
234
235 // Check whether a given young length (young_length) fits into the
236 // given target pause time and whether the prediction for the amount
237 // of objects to be copied for the given length will fit into the
238 // given free space (expressed by base_free_regions). It is used by
239 // calculate_young_list_target_length().
240 bool predict_will_fit(uint young_length, double base_time_ms,
241 uint base_free_regions, double target_pause_time_ms) const;
242
243 public:
244 size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; }
245
246 // Calculate the minimum number of old regions we'll add to the CSet
247 // during a mixed GC.
248 uint calc_min_old_cset_length() const;
249
250 // Calculate the maximum number of old regions we'll add to the CSet
251 // during a mixed GC.
252 uint calc_max_old_cset_length() const;
253
|
67 G1Analytics* _analytics;
68 G1RemSetTrackingPolicy _remset_tracker;
69 G1MMUTracker* _mmu_tracker;
70 G1IHOPControl* _ihop_control;
71
72 GCPolicyCounters* _policy_counters;
73
74 double _full_collection_start_sec;
75
76 jlong _collection_pause_end_millis;
77
78 uint _young_list_target_length;
79 uint _young_list_fixed_length;
80
81 // The max number of regions we can extend the eden by while the GC
82 // locker is active. This should be >= _young_list_target_length;
83 uint _young_list_max_length;
84
85 // SurvRateGroups below must be initialized after the predictor because they
86 // indirectly use it through this object passed to their constructor.
87 SurvRateGroup* _eden_surv_rate_group;
88 SurvRateGroup* _survivor_surv_rate_group;
89
90 double _reserve_factor;
91 // This will be set when the heap is expanded
92 // for the first time during initialization.
93 uint _reserve_regions;
94
95 G1YoungGenSizer* _young_gen_sizer;
96
97 uint _free_regions_at_end_of_collection;
98
99 size_t _rs_length;
100
101 size_t _rs_length_prediction;
102
103 size_t _pending_cards_at_gc_start;
104 size_t _pending_cards_at_prev_gc_end;
105 size_t _total_mutator_refined_cards;
106 size_t _total_concurrent_refined_cards;
107 Tickspan _total_concurrent_refinement_time;
111 size_t _bytes_allocated_in_old_since_last_gc;
112
113 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
114
115 bool should_update_surv_rate_group_predictors() {
116 return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
117 }
118
119 double logged_cards_processing_time() const;
120 public:
121 const G1Predictions& predictor() const { return _predictor; }
122 const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
123
124 G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
125
126 // Add the given number of bytes to the total number of allocated bytes in the old gen.
127 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
128
129 void set_region_eden(HeapRegion* hr) {
130 hr->set_eden();
131 hr->install_surv_rate_group(_eden_surv_rate_group);
132 }
133
134 void set_region_survivor(HeapRegion* hr) {
135 assert(hr->is_survivor(), "pre-condition");
136 hr->install_surv_rate_group(_survivor_surv_rate_group);
137 }
138
139 void record_rs_length(size_t rs_length) {
140 _rs_length = rs_length;
141 }
142
143 double predict_base_elapsed_time_ms(size_t num_pending_cards) const;
144
145 private:
146 double predict_base_elapsed_time_ms(size_t num_pending_cards, size_t rs_length) const;
147
148 double predict_region_copy_time_ms(HeapRegion* hr) const;
149
150 public:
151
152 double predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy = NULL) const;
153 double predict_region_non_copy_time_ms(HeapRegion* hr, bool for_young_gc) const;
154 double predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const;
155
156 void cset_regions_freed() {
157 bool update = should_update_surv_rate_group_predictors();
158
159 _eden_surv_rate_group->all_surviving_words_recorded(predictor(), update);
160 _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update);
161 }
162
163 G1MMUTracker* mmu_tracker() {
164 return _mmu_tracker;
165 }
166
167 const G1MMUTracker* mmu_tracker() const {
168 return _mmu_tracker;
169 }
170
171 double max_pause_time_ms() const {
172 return _mmu_tracker->max_gc_time() * 1000.0;
173 }
174
175 private:
176 G1CollectionSet* _collection_set;
177 double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
178 double other_time_ms(double pause_time_ms) const;
179
180 double young_other_time_ms() const;
181 double non_young_other_time_ms() const;
182 double constant_other_time_ms(double pause_time_ms) const;
183
184 G1CollectionSetChooser* cset_chooser() const;
185
186 // Stash a pointer to the g1 heap.
187 G1CollectedHeap* _g1h;
188
189 G1GCPhaseTimes* _phase_times;
190
191 // This set of variables tracks the collector efficiency, in order to
192 // determine whether we should initiate a new marking.
193 double _mark_remark_start_sec;
194 double _mark_cleanup_start_sec;
217 uint calculate_young_list_desired_max_length() const;
218
219 // Calculate and return the maximum young list target length that
220 // can fit into the pause time goal. The parameters are: rs_length
221 // represent the prediction of how large the young RSet lengths will
222 // be, base_min_length is the already existing number of regions in
223 // the young list, min_length and max_length are the desired min and
224 // max young list length according to the user's inputs.
225 uint calculate_young_list_target_length(size_t rs_length,
226 uint base_min_length,
227 uint desired_min_length,
228 uint desired_max_length) const;
229
230 // Result of the bounded_young_list_target_length() method, containing both the
231 // bounded as well as the unbounded young list target lengths in this order.
232 typedef Pair<uint, uint, StackObj> YoungTargetLengths;
233 YoungTargetLengths young_list_target_lengths(size_t rs_length) const;
234
235 void update_rs_length_prediction();
236 void update_rs_length_prediction(size_t prediction);
237
238 size_t predict_bytes_to_copy(HeapRegion* hr) const;
239 double predict_survivor_regions_evac_time() const;
240
241 // Check whether a given young length (young_length) fits into the
242 // given target pause time and whether the prediction for the amount
243 // of objects to be copied for the given length will fit into the
244 // given free space (expressed by base_free_regions). It is used by
245 // calculate_young_list_target_length().
246 bool predict_will_fit(uint young_length, double base_time_ms,
247 uint base_free_regions, double target_pause_time_ms) const;
248
249 public:
250 size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; }
251
252 // Calculate the minimum number of old regions we'll add to the CSet
253 // during a mixed GC.
254 uint calc_min_old_cset_length() const;
255
256 // Calculate the maximum number of old regions we'll add to the CSet
257 // during a mixed GC.
258 uint calc_max_old_cset_length() const;
259
|