1 /*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
26 #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
27
28 #include "gc/g1/g1CollectorState.hpp"
29 #include "gc/g1/g1GCPhaseTimes.hpp"
30 #include "gc/g1/g1InCSetState.hpp"
31 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
32 #include "gc/g1/g1MMUTracker.hpp"
33 #include "gc/g1/g1Predictions.hpp"
34 #include "gc/shared/collectorPolicy.hpp"
35 #include "utilities/pair.hpp"
36
37 // A G1CollectorPolicy makes policy decisions that determine the
38 // characteristics of the collector. Examples include:
39 // * choice of collection set.
40 // * when to collect.
41
42 class HeapRegion;
43 class G1CollectionSet;
44 class CollectionSetChooser;
45 class G1IHOPControl;
46 class G1YoungGenSizer;
47
48 class G1CollectorPolicy: public CollectorPolicy {
49 private:
50 G1IHOPControl* _ihop_control;
51
52 G1IHOPControl* create_ihop_control() const;
53 // Update the IHOP control with necessary statistics.
54 void update_ihop_prediction(double mutator_time_s,
55 size_t mutator_alloc_bytes,
56 size_t young_gen_size);
57 void report_ihop_statistics();
58
59 G1Predictions _predictor;
60
61 double get_new_prediction(TruncatedSeq const* seq) const;
62 size_t get_new_size_prediction(TruncatedSeq const* seq) const;
63
64 G1MMUTracker* _mmu_tracker;
65
66 void initialize_alignments();
67 void initialize_flags();
68
69 double _full_collection_start_sec;
70
71 // These exclude marking times.
72 TruncatedSeq* _recent_gc_times_ms;
73
74 TruncatedSeq* _concurrent_mark_remark_times_ms;
75 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
76
77 // Ratio check data for determining if heap growth is necessary.
78 uint _ratio_over_threshold_count;
79 double _ratio_over_threshold_sum;
80 uint _pauses_since_start;
81
82 uint _young_list_target_length;
83 uint _young_list_fixed_length;
84
85 // The max number of regions we can extend the eden by while the GC
86 // locker is active. This should be >= _young_list_target_length;
87 uint _young_list_max_length;
88
89 SurvRateGroup* _short_lived_surv_rate_group;
90 SurvRateGroup* _survivor_surv_rate_group;
91 // add here any more surv rate groups
92
93 double _gc_overhead_perc;
94
95 double _reserve_factor;
96 uint _reserve_regions;
97
98 enum PredictionConstants {
99 TruncatedSeqLength = 10,
100 NumPrevPausesForHeuristics = 10,
101 // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
102 // representing the minimum number of pause time ratios that exceed
103 // GCTimeRatio before a heap expansion will be triggered.
104 MinOverThresholdForGrowth = 4
105 };
106
107 TruncatedSeq* _alloc_rate_ms_seq;
108 double _prev_collection_pause_end_ms;
109
110 TruncatedSeq* _rs_length_diff_seq;
111 TruncatedSeq* _cost_per_card_ms_seq;
112 TruncatedSeq* _cost_scan_hcc_seq;
113 TruncatedSeq* _young_cards_per_entry_ratio_seq;
114 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
115 TruncatedSeq* _cost_per_entry_ms_seq;
116 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
117 TruncatedSeq* _cost_per_byte_ms_seq;
118 TruncatedSeq* _constant_other_time_ms_seq;
119 TruncatedSeq* _young_other_cost_per_region_ms_seq;
120 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
121
122 TruncatedSeq* _pending_cards_seq;
123 TruncatedSeq* _rs_lengths_seq;
124
125 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
126
127 G1YoungGenSizer* _young_gen_sizer;
128
129 uint _free_regions_at_end_of_collection;
130
131 size_t _max_rs_lengths;
132
133 size_t _rs_lengths_prediction;
134
135 #ifndef PRODUCT
136 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
137 #endif // PRODUCT
138
139 void adjust_concurrent_refinement(double update_rs_time,
140 double update_rs_processed_buffers,
141 double goal_ms);
142
143 double _pause_time_target_ms;
144
145 size_t _pending_cards;
146
147 // The amount of allocated bytes in old gen during the last mutator and the following
148 // young GC phase.
149 size_t _bytes_allocated_in_old_since_last_gc;
150
151 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
152 public:
153 const G1Predictions& predictor() const { return _predictor; }
154
155 // Add the given number of bytes to the total number of allocated bytes in the old gen.
156 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
157
158 // Accessors
159
160 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
161 hr->set_eden();
162 hr->install_surv_rate_group(_short_lived_surv_rate_group);
163 hr->set_young_index_in_cset(young_index_in_cset);
164 }
165
166 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
167 assert(hr->is_survivor(), "pre-condition");
168 hr->install_surv_rate_group(_survivor_surv_rate_group);
169 hr->set_young_index_in_cset(young_index_in_cset);
170 }
171
172 #ifndef PRODUCT
173 bool verify_young_ages();
174 #endif // PRODUCT
175
176 void record_max_rs_lengths(size_t rs_lengths) {
177 _max_rs_lengths = rs_lengths;
178 }
179
180 size_t predict_rs_lengths() const;
181
182 size_t predict_rs_length_diff() const;
183
184 double predict_alloc_rate_ms() const;
185
186 double predict_cost_per_card_ms() const;
187
188 double predict_scan_hcc_ms() const;
189
190 double predict_rs_update_time_ms(size_t pending_cards) const;
191
192 double predict_young_cards_per_entry_ratio() const;
193
194 double predict_mixed_cards_per_entry_ratio() const;
195
196 size_t predict_young_card_num(size_t rs_length) const;
197
198 size_t predict_non_young_card_num(size_t rs_length) const;
199
200 double predict_rs_scan_time_ms(size_t card_num) const;
201
202 double predict_mixed_rs_scan_time_ms(size_t card_num) const;
203
204 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
205
206 double predict_object_copy_time_ms(size_t bytes_to_copy) const;
207
208 double predict_constant_other_time_ms() const;
209
210 double predict_young_other_time_ms(size_t young_num) const;
211
212 double predict_non_young_other_time_ms(size_t non_young_num) const;
213
214 double predict_base_elapsed_time_ms(size_t pending_cards) const;
215 double predict_base_elapsed_time_ms(size_t pending_cards,
216 size_t scanned_cards) const;
217 size_t predict_bytes_to_copy(HeapRegion* hr) const;
218 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
219
220 double predict_survivor_regions_evac_time() const;
221
222 bool should_update_surv_rate_group_predictors() {
223 return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
224 }
225
226 void cset_regions_freed() {
227 bool update = should_update_surv_rate_group_predictors();
228
229 _short_lived_surv_rate_group->all_surviving_words_recorded(update);
230 _survivor_surv_rate_group->all_surviving_words_recorded(update);
231 }
232
233 G1MMUTracker* mmu_tracker() {
234 return _mmu_tracker;
235 }
236
237 const G1MMUTracker* mmu_tracker() const {
238 return _mmu_tracker;
239 }
240
241 double max_pause_time_ms() const {
242 return _mmu_tracker->max_gc_time() * 1000.0;
243 }
244
245 double predict_remark_time_ms() const;
246
247 double predict_cleanup_time_ms() const;
248
249 // Returns an estimate of the survival rate of the region at yg-age
250 // "yg_age".
251 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
252
253 double predict_yg_surv_rate(int age) const;
254
255 double accum_yg_surv_rate_pred(int age) const;
256
257 protected:
258 G1CollectionSet* _collection_set;
259 virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
260 virtual double other_time_ms(double pause_time_ms) const;
261
262 double young_other_time_ms() const;
263 double non_young_other_time_ms() const;
264 double constant_other_time_ms(double pause_time_ms) const;
265
266 CollectionSetChooser* cset_chooser() const;
267 private:
268 // Statistics kept per GC stoppage, pause or full.
269 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
270
271 // Add a new GC of the given duration and end time to the record.
272 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
273
274 // The number of bytes copied during the GC.
275 size_t _bytes_copied_during_gc;
276
277 // Stash a pointer to the g1 heap.
278 G1CollectedHeap* _g1;
279
280 G1GCPhaseTimes* _phase_times;
281
282 // The ratio of gc time to elapsed time, computed over recent pauses,
283 // and the ratio for just the last pause.
284 double _recent_avg_pause_time_ratio;
285 double _last_pause_time_ratio;
286
287 double recent_avg_pause_time_ratio() const {
288 return _recent_avg_pause_time_ratio;
289 }
290
291 // This set of variables tracks the collector efficiency, in order to
292 // determine whether we should initiate a new marking.
293 double _mark_remark_start_sec;
294 double _mark_cleanup_start_sec;
295
296 // Updates the internal young list maximum and target lengths. Returns the
297 // unbounded young list target length.
298 uint update_young_list_max_and_target_length();
299 uint update_young_list_max_and_target_length(size_t rs_lengths);
300
301 // Update the young list target length either by setting it to the
302 // desired fixed value or by calculating it using G1's pause
303 // prediction model. If no rs_lengths parameter is passed, predict
304 // the RS lengths using the prediction model, otherwise use the
305 // given rs_lengths as the prediction.
306 // Returns the unbounded young list target length.
307 uint update_young_list_target_length(size_t rs_lengths);
308
309 // Calculate and return the minimum desired young list target
310 // length. This is the minimum desired young list length according
311 // to the user's inputs.
312 uint calculate_young_list_desired_min_length(uint base_min_length) const;
313
314 // Calculate and return the maximum desired young list target
315 // length. This is the maximum desired young list length according
316 // to the user's inputs.
317 uint calculate_young_list_desired_max_length() const;
318
319 // Calculate and return the maximum young list target length that
320 // can fit into the pause time goal. The parameters are: rs_lengths
321 // represent the prediction of how large the young RSet lengths will
322 // be, base_min_length is the already existing number of regions in
323 // the young list, min_length and max_length are the desired min and
324 // max young list length according to the user's inputs.
325 uint calculate_young_list_target_length(size_t rs_lengths,
326 uint base_min_length,
327 uint desired_min_length,
328 uint desired_max_length) const;
329
330 // Result of the bounded_young_list_target_length() method, containing both the
331 // bounded as well as the unbounded young list target lengths in this order.
332 typedef Pair<uint, uint, StackObj> YoungTargetLengths;
333 YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
334
335 void update_rs_lengths_prediction();
336 void update_rs_lengths_prediction(size_t prediction);
337
338 // Calculate and return chunk size (in number of regions) for parallel
339 // concurrent mark cleanup.
340 uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
341
342 // Check whether a given young length (young_length) fits into the
343 // given target pause time and whether the prediction for the amount
344 // of objects to be copied for the given length will fit into the
345 // given free space (expressed by base_free_regions). It is used by
346 // calculate_young_list_target_length().
347 bool predict_will_fit(uint young_length, double base_time_ms,
348 uint base_free_regions, double target_pause_time_ms) const;
349
350 public:
351 size_t pending_cards() const { return _pending_cards; }
352
353 // Calculate the minimum number of old regions we'll add to the CSet
354 // during a mixed GC.
355 uint calc_min_old_cset_length() const;
356
357 // Calculate the maximum number of old regions we'll add to the CSet
358 // during a mixed GC.
359 uint calc_max_old_cset_length() const;
360
361 // Returns the given amount of uncollected reclaimable space
362 // as a percentage of the current heap capacity.
363 double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
364
365 private:
366 // Sets up marking if proper conditions are met.
367 void maybe_start_marking();
368
369 // The kind of STW pause.
370 enum PauseKind {
371 FullGC,
372 YoungOnlyGC,
373 MixedGC,
374 LastYoungGC,
375 InitialMarkGC,
376 Cleanup,
377 Remark
378 };
379
380 // Calculate PauseKind from internal state.
381 PauseKind young_gc_pause_kind() const;
382 // Record the given STW pause with the given start and end times (in s).
383 void record_pause(PauseKind kind, double start, double end);
384 // Indicate that we aborted marking before doing any mixed GCs.
385 void abort_time_to_mixed_tracking();
386 public:
387
388 G1CollectorPolicy();
389
390 virtual ~G1CollectorPolicy();
391
392 virtual G1CollectorPolicy* as_g1_policy() { return this; }
393
394 G1CollectorState* collector_state() const;
395
396 G1GCPhaseTimes* phase_times() const { return _phase_times; }
397
398 // Check the current value of the young list RSet lengths and
399 // compare it against the last prediction. If the current value is
400 // higher, recalculate the young list target length prediction.
401 void revise_young_list_target_length_if_necessary(size_t rs_lengths);
402
403 // This should be called after the heap is resized.
404 void record_new_heap_size(uint new_number_of_regions);
405
406 void init();
407
408 virtual void note_gc_start(uint num_active_workers);
409
410 // Create jstat counters for the policy.
411 virtual void initialize_gc_policy_counters();
412
413 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
414
415 bool about_to_start_mixed_phase() const;
416
417 // Record the start and end of an evacuation pause.
418 void record_collection_pause_start(double start_time_sec);
419 void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
420
421 // Record the start and end of a full collection.
422 void record_full_collection_start();
423 void record_full_collection_end();
424
425 // Must currently be called while the world is stopped.
426 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
427
428 // Record start and end of remark.
429 void record_concurrent_mark_remark_start();
430 void record_concurrent_mark_remark_end();
431
432 // Record start, end, and completion of cleanup.
433 void record_concurrent_mark_cleanup_start();
434 void record_concurrent_mark_cleanup_end();
435 void record_concurrent_mark_cleanup_completed();
436
437 virtual void print_phases();
438
439 // Record how much space we copied during a GC. This is typically
440 // called when a GC alloc region is being retired.
441 void record_bytes_copied_during_gc(size_t bytes) {
442 _bytes_copied_during_gc += bytes;
443 }
444
445 // The amount of space we copied during a GC.
446 size_t bytes_copied_during_gc() const {
447 return _bytes_copied_during_gc;
448 }
449
450 // Determine whether there are candidate regions so that the
451 // next GC should be mixed. The two action strings are used
452 // in the ergo output when the method returns true or false.
453 bool next_gc_should_be_mixed(const char* true_action_str,
454 const char* false_action_str) const;
455
456 virtual void finalize_collection_set(double target_pause_time_ms);
457 private:
458 // Set the state to start a concurrent marking cycle and clear
459 // _initiate_conc_mark_if_possible because it has now been
460 // acted on.
461 void initiate_conc_mark();
462
463 public:
464 // This sets the initiate_conc_mark_if_possible() flag to start a
465 // new cycle, as long as we are not already in one. It's best if it
466 // is called during a safepoint when the test whether a cycle is in
467 // progress or not is stable.
468 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
469
470 // This is called at the very beginning of an evacuation pause (it
471 // has to be the first thing that the pause does). If
472 // initiate_conc_mark_if_possible() is true, and the concurrent
473 // marking thread has completed its work during the previous cycle,
474 // it will set during_initial_mark_pause() to so that the pause does
475 // the initial-mark work and start a marking cycle.
476 void decide_on_conc_mark_initiation();
477
478 // If an expansion would be appropriate, because recent GC overhead had
479 // exceeded the desired limit, return an amount to expand by.
480 virtual size_t expansion_amount();
481
482 // Clear ratio tracking data used by expansion_amount().
483 void clear_ratio_check_data();
484
485 // Print stats on young survival ratio
486 void print_yg_surv_rate_info() const;
487
488 void finished_recalculating_age_indexes(bool is_survivors) {
489 if (is_survivors) {
490 _survivor_surv_rate_group->finished_recalculating_age_indexes();
491 } else {
492 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
493 }
494 // do that for any other surv rate groups
495 }
496
497 size_t young_list_target_length() const { return _young_list_target_length; }
498
499 bool is_young_list_full() const;
500
501 bool can_expand_young_list() const;
502
503 uint young_list_max_length() const {
504 return _young_list_max_length;
505 }
506
507 bool adaptive_young_list_length() const;
508
509 virtual bool should_process_references() const {
510 return true;
511 }
512
513 private:
514 //
515 // Survivor regions policy.
516 //
517
518 // Current tenuring threshold, set to 0 if the collector reaches the
519 // maximum amount of survivors regions.
520 uint _tenuring_threshold;
521
522 // The limit on the number of regions allocated for survivors.
523 uint _max_survivor_regions;
524
525 AgeTable _survivors_age_table;
526
527 public:
528 uint tenuring_threshold() const { return _tenuring_threshold; }
529
530 uint max_survivor_regions() {
531 return _max_survivor_regions;
532 }
533
534 static const uint REGIONS_UNLIMITED = (uint) -1;
535
536 uint max_regions(InCSetState dest) const {
537 switch (dest.value()) {
538 case InCSetState::Young:
539 return _max_survivor_regions;
540 case InCSetState::Old:
541 return REGIONS_UNLIMITED;
542 default:
543 assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value());
544 break;
545 }
546 // keep some compilers happy
547 return 0;
548 }
549
550 void note_start_adding_survivor_regions() {
551 _survivor_surv_rate_group->start_adding_regions();
552 }
553
554 void note_stop_adding_survivor_regions() {
555 _survivor_surv_rate_group->stop_adding_regions();
556 }
557
558 void record_age_table(AgeTable* age_table) {
559 _survivors_age_table.merge(age_table);
560 }
561
562 void update_max_gc_locker_expansion();
563
564 // Calculates survivor space parameters.
565 void update_survivors_policy();
566
567 virtual void post_heap_initialize();
568 };
569
570 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
|
1 /*
2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
26 #define SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/globalDefinitions.hpp"
30
31 class TruncatedSeq;
32 class G1Predictions;
33
34 class G1Measurements: public CHeapObj<mtGC> {
35 const static int TruncatedSeqLength = 10;
36 const static int NumPrevPausesForHeuristics = 10;
37 G1Predictions* _predictor;
38
39 // These exclude marking times.
40 TruncatedSeq* _recent_gc_times_ms;
41
42 TruncatedSeq* _concurrent_mark_remark_times_ms;
43 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
44
45 TruncatedSeq* _alloc_rate_ms_seq;
46 double _prev_collection_pause_end_ms;
47
48 TruncatedSeq* _rs_length_diff_seq;
49 TruncatedSeq* _cost_per_card_ms_seq;
50 TruncatedSeq* _cost_scan_hcc_seq;
51 TruncatedSeq* _young_cards_per_entry_ratio_seq;
52 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
53 TruncatedSeq* _cost_per_entry_ms_seq;
54 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
55 TruncatedSeq* _cost_per_byte_ms_seq;
56 TruncatedSeq* _constant_other_time_ms_seq;
57 TruncatedSeq* _young_other_cost_per_region_ms_seq;
58 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
59
60 TruncatedSeq* _pending_cards_seq;
61 TruncatedSeq* _rs_lengths_seq;
62
63 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
64
65 // Statistics kept per GC stoppage, pause or full.
66 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
67
68 // The ratio of gc time to elapsed time, computed over recent pauses,
69 // and the ratio for just the last pause.
70 double _recent_avg_pause_time_ratio;
71 double _last_pause_time_ratio;
72
73 double get_new_prediction(TruncatedSeq const* seq) const;
74 size_t get_new_size_prediction(TruncatedSeq const* seq) const;
75
76 void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
77 public:
78 G1Measurements(G1Predictions* predictor);
79
80 double prev_collection_pause_end_ms() const {
81 return _prev_collection_pause_end_ms;
82 }
83
84 double recent_avg_pause_time_ratio() const {
85 return _recent_avg_pause_time_ratio;
86 }
87
88 double last_pause_time_ratio() const {
89 return _last_pause_time_ratio;
90 }
91
92 void append_prev_collection_pause_end_ms(double ms) {
93 _prev_collection_pause_end_ms += ms;
94 }
95
96 void report_concurrent_mark_remark_times_ms(double ms);
97 void report_concurrent_mark_cleanup_times_ms(double ms);
98 void report_alloc_rate_ms(double alloc_rate);
99 void report_cost_per_card_ms(double cost_per_card_ms);
100 void report_cost_scan_hcc(double cost_scan_hcc);
101 void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
102 void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
103 void report_rs_length_diff(double rs_length_diff);
104 void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
105 void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
106 void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
107 void report_constant_other_time_ms(double constant_other_time_ms);
108 void report_pending_cards(double pending_cards);
109 void report_rs_lengths(double rs_lengths);
110
111 size_t predict_rs_length_diff() const;
112
113 double predict_alloc_rate_ms() const;
114 int num_alloc_rate_ms() const;
115
116 double predict_cost_per_card_ms() const;
117
118 double predict_scan_hcc_ms() const;
119
120 double predict_rs_update_time_ms(size_t pending_cards) const;
121
122 double predict_young_cards_per_entry_ratio() const;
123
124 double predict_mixed_cards_per_entry_ratio() const;
125
126 size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
127
128 double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
129
130 double predict_mixed_rs_scan_time_ms(size_t card_num) const;
131
132 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
133
134 double predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const;
135
136 double predict_constant_other_time_ms() const;
137
138 double predict_young_other_time_ms(size_t young_num) const;
139
140 double predict_non_young_other_time_ms(size_t non_young_num) const;
141
142 double predict_remark_time_ms() const;
143
144 double predict_cleanup_time_ms() const;
145
146 size_t predict_rs_lengths() const;
147 size_t predict_pending_cards() const;
148
149 // Add a new GC of the given duration and end time to the record.
150 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
151 };
152
153 #endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
|