Print this page
rev 3905 : 8001424: G1: Rename certain G1-specific flags
Summary: Rename G1DefaultMinNewGenPercent, G1DefaultMaxNewGenPercent, and G1OldCSetRegionLiveThresholdPercent to G1NewSizePercent, G1MaxNewSizePercent, and G1MixedGCLiveThresholdPercent respectively. Continue, however, to accept the previous names. If both are specified, the new name takes precedence.
Reviewed-by:
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
27 27
28 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
30 30 #include "memory/collectorPolicy.hpp"
31 31
32 32 // A G1CollectorPolicy makes policy decisions that determine the
33 33 // characteristics of the collector. Examples include:
34 34 // * choice of collection set.
35 35 // * when to collect.
36 36
37 37 class HeapRegion;
38 38 class CollectionSetChooser;
39 39 class G1GCPhaseTimes;
40 40
41 41 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
42 42 // (the latter may contain non-young regions - i.e. regions that are
43 43 // technically in Gen1) while TraceGen1Time collects data about full GCs.
44 44 class TraceGen0TimeData : public CHeapObj<mtGC> {
45 45 private:
46 46 unsigned _young_pause_num;
47 47 unsigned _mixed_pause_num;
48 48
49 49 NumberSeq _all_stop_world_times_ms;
50 50 NumberSeq _all_yield_times_ms;
51 51
52 52 NumberSeq _total;
53 53 NumberSeq _other;
54 54 NumberSeq _root_region_scan_wait;
55 55 NumberSeq _parallel;
56 56 NumberSeq _ext_root_scan;
57 57 NumberSeq _satb_filtering;
58 58 NumberSeq _update_rs;
59 59 NumberSeq _scan_rs;
60 60 NumberSeq _obj_copy;
61 61 NumberSeq _termination;
62 62 NumberSeq _parallel_other;
63 63 NumberSeq _clear_ct;
64 64
65 65 void print_summary(const char* str, const NumberSeq* seq) const;
66 66 void print_summary_sd(const char* str, const NumberSeq* seq) const;
67 67
68 68 public:
69 69 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
70 70 void record_start_collection(double time_to_stop_the_world_ms);
71 71 void record_yield_time(double yield_time_ms);
72 72 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
73 73 void increment_young_collection_count();
74 74 void increment_mixed_collection_count();
75 75 void print() const;
76 76 };
77 77
78 78 class TraceGen1TimeData : public CHeapObj<mtGC> {
79 79 private:
80 80 NumberSeq _all_full_gc_times;
81 81
82 82 public:
83 83 void record_full_collection(double full_gc_time_ms);
84 84 void print() const;
85 85 };
86 86
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
87 87 // There are three command line options related to the young gen size:
88 88 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
89 89 // just a short form for NewSize==MaxNewSize). G1 will use its internal
90 90 // heuristics to calculate the actual young gen size, so these options
91 91 // basically only limit the range within which G1 can pick a young gen
92 92 // size. Also, these are general options taking byte sizes. G1 will
93 93 // internally work with a number of regions instead. So, some rounding
94 94 // will occur.
95 95 //
96 96 // If nothing related to the the young gen size is set on the command
97 -// line we should allow the young gen to be between
98 -// G1DefaultMinNewGenPercent and G1DefaultMaxNewGenPercent of the
99 -// heap size. This means that every time the heap size changes the
100 -// limits for the young gen size will be updated.
97 +// line we should allow the young gen to be between G1NewSizePercent
98 +// and G1MaxNewSizePercent of the heap size. This means that every time
99 +// the heap size changes, the limits for the young gen size will be
100 +// recalculated.
101 101 //
102 102 // If only -XX:NewSize is set we should use the specified value as the
103 -// minimum size for young gen. Still using G1DefaultMaxNewGenPercent
104 -// of the heap as maximum.
103 +// minimum size for young gen. Still using G1MaxNewSizePercent of the
104 +// heap as maximum.
105 105 //
106 106 // If only -XX:MaxNewSize is set we should use the specified value as the
107 -// maximum size for young gen. Still using G1DefaultMinNewGenPercent
108 -// of the heap as minimum.
107 +// maximum size for young gen. Still using G1NewSizePercent of the heap
108 +// as minimum.
109 109 //
110 110 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
111 111 // No updates when the heap size changes. There is a special case when
112 112 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
113 113 // different heuristic for calculating the collection set when we do mixed
114 114 // collection.
115 115 //
116 116 // If only -XX:NewRatio is set we should use the specified ratio of the heap
117 117 // as both min and max. This will be interpreted as "fixed" just like the
118 118 // NewSize==MaxNewSize case above. But we will update the min and max
119 119 // everytime the heap size changes.
120 120 //
121 121 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
122 122 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
123 123 class G1YoungGenSizer : public CHeapObj<mtGC> {
124 124 private:
125 125 enum SizerKind {
126 126 SizerDefaults,
127 127 SizerNewSizeOnly,
128 128 SizerMaxNewSizeOnly,
129 129 SizerMaxAndNewSize,
130 130 SizerNewRatio
131 131 };
132 132 SizerKind _sizer_kind;
133 133 uint _min_desired_young_length;
134 134 uint _max_desired_young_length;
135 135 bool _adaptive_size;
136 136 uint calculate_default_min_length(uint new_number_of_heap_regions);
137 137 uint calculate_default_max_length(uint new_number_of_heap_regions);
138 138
139 139 public:
140 140 G1YoungGenSizer();
141 141 void heap_size_changed(uint new_number_of_heap_regions);
142 142 uint min_desired_young_length() {
143 143 return _min_desired_young_length;
144 144 }
145 145 uint max_desired_young_length() {
146 146 return _max_desired_young_length;
147 147 }
148 148 bool adaptive_young_list_length() {
149 149 return _adaptive_size;
150 150 }
151 151 };
152 152
153 153 class G1CollectorPolicy: public CollectorPolicy {
154 154 private:
155 155 // either equal to the number of parallel threads, if ParallelGCThreads
156 156 // has been set, or 1 otherwise
157 157 int _parallel_gc_threads;
158 158
159 159 // The number of GC threads currently active.
160 160 uintx _no_of_gc_threads;
161 161
162 162 enum SomePrivateConstants {
163 163 NumPrevPausesForHeuristics = 10
164 164 };
165 165
166 166 G1MMUTracker* _mmu_tracker;
167 167
168 168 void initialize_flags();
169 169
170 170 void initialize_all() {
171 171 initialize_flags();
172 172 initialize_size_info();
173 173 }
174 174
175 175 CollectionSetChooser* _collectionSetChooser;
176 176
177 177 double _full_collection_start_sec;
178 178 size_t _cur_collection_pause_used_at_start_bytes;
179 179 uint _cur_collection_pause_used_regions_at_start;
180 180
181 181 // These exclude marking times.
182 182 TruncatedSeq* _recent_gc_times_ms;
183 183
184 184 TruncatedSeq* _concurrent_mark_remark_times_ms;
185 185 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
186 186
187 187 TraceGen0TimeData _trace_gen0_time_data;
188 188 TraceGen1TimeData _trace_gen1_time_data;
189 189
190 190 double _stop_world_start;
191 191
192 192 // indicates whether we are in young or mixed GC mode
193 193 bool _gcs_are_young;
194 194
195 195 uint _young_list_target_length;
196 196 uint _young_list_fixed_length;
197 197 size_t _prev_eden_capacity; // used for logging
198 198
199 199 // The max number of regions we can extend the eden by while the GC
200 200 // locker is active. This should be >= _young_list_target_length;
201 201 uint _young_list_max_length;
202 202
203 203 bool _last_gc_was_young;
204 204
205 205 bool _during_marking;
206 206 bool _in_marking_window;
207 207 bool _in_marking_window_im;
208 208
209 209 SurvRateGroup* _short_lived_surv_rate_group;
210 210 SurvRateGroup* _survivor_surv_rate_group;
211 211 // add here any more surv rate groups
212 212
213 213 double _gc_overhead_perc;
214 214
215 215 double _reserve_factor;
216 216 uint _reserve_regions;
217 217
218 218 bool during_marking() {
219 219 return _during_marking;
220 220 }
221 221
222 222 private:
223 223 enum PredictionConstants {
224 224 TruncatedSeqLength = 10
225 225 };
226 226
227 227 TruncatedSeq* _alloc_rate_ms_seq;
228 228 double _prev_collection_pause_end_ms;
229 229
230 230 TruncatedSeq* _rs_length_diff_seq;
231 231 TruncatedSeq* _cost_per_card_ms_seq;
232 232 TruncatedSeq* _young_cards_per_entry_ratio_seq;
233 233 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
234 234 TruncatedSeq* _cost_per_entry_ms_seq;
235 235 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
236 236 TruncatedSeq* _cost_per_byte_ms_seq;
237 237 TruncatedSeq* _constant_other_time_ms_seq;
238 238 TruncatedSeq* _young_other_cost_per_region_ms_seq;
239 239 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
240 240
241 241 TruncatedSeq* _pending_cards_seq;
242 242 TruncatedSeq* _rs_lengths_seq;
243 243
244 244 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
245 245
246 246 G1YoungGenSizer* _young_gen_sizer;
247 247
248 248 uint _eden_cset_region_length;
249 249 uint _survivor_cset_region_length;
250 250 uint _old_cset_region_length;
251 251
252 252 void init_cset_region_lengths(uint eden_cset_region_length,
253 253 uint survivor_cset_region_length);
254 254
255 255 uint eden_cset_region_length() { return _eden_cset_region_length; }
256 256 uint survivor_cset_region_length() { return _survivor_cset_region_length; }
257 257 uint old_cset_region_length() { return _old_cset_region_length; }
258 258
259 259 uint _free_regions_at_end_of_collection;
260 260
261 261 size_t _recorded_rs_lengths;
262 262 size_t _max_rs_lengths;
263 263 double _sigma;
264 264
265 265 size_t _rs_lengths_prediction;
266 266
267 267 double sigma() { return _sigma; }
268 268
269 269 // A function that prevents us putting too much stock in small sample
270 270 // sets. Returns a number between 2.0 and 1.0, depending on the number
271 271 // of samples. 5 or more samples yields one; fewer scales linearly from
272 272 // 2.0 at 1 sample to 1.0 at 5.
273 273 double confidence_factor(int samples) {
274 274 if (samples > 4) return 1.0;
275 275 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
276 276 }
277 277
278 278 double get_new_neg_prediction(TruncatedSeq* seq) {
279 279 return seq->davg() - sigma() * seq->dsd();
280 280 }
281 281
282 282 #ifndef PRODUCT
283 283 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
284 284 #endif // PRODUCT
285 285
286 286 void adjust_concurrent_refinement(double update_rs_time,
287 287 double update_rs_processed_buffers,
288 288 double goal_ms);
289 289
290 290 uintx no_of_gc_threads() { return _no_of_gc_threads; }
291 291 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
292 292
293 293 double _pause_time_target_ms;
294 294
295 295 size_t _pending_cards;
296 296
297 297 public:
298 298 // Accessors
299 299
300 300 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
301 301 hr->set_young();
302 302 hr->install_surv_rate_group(_short_lived_surv_rate_group);
303 303 hr->set_young_index_in_cset(young_index_in_cset);
304 304 }
305 305
306 306 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
307 307 assert(hr->is_young() && hr->is_survivor(), "pre-condition");
308 308 hr->install_surv_rate_group(_survivor_surv_rate_group);
309 309 hr->set_young_index_in_cset(young_index_in_cset);
310 310 }
311 311
312 312 #ifndef PRODUCT
313 313 bool verify_young_ages();
314 314 #endif // PRODUCT
315 315
316 316 double get_new_prediction(TruncatedSeq* seq) {
317 317 return MAX2(seq->davg() + sigma() * seq->dsd(),
318 318 seq->davg() * confidence_factor(seq->num()));
319 319 }
320 320
321 321 void record_max_rs_lengths(size_t rs_lengths) {
322 322 _max_rs_lengths = rs_lengths;
323 323 }
324 324
325 325 size_t predict_rs_length_diff() {
326 326 return (size_t) get_new_prediction(_rs_length_diff_seq);
327 327 }
328 328
329 329 double predict_alloc_rate_ms() {
330 330 return get_new_prediction(_alloc_rate_ms_seq);
331 331 }
332 332
333 333 double predict_cost_per_card_ms() {
334 334 return get_new_prediction(_cost_per_card_ms_seq);
335 335 }
336 336
337 337 double predict_rs_update_time_ms(size_t pending_cards) {
338 338 return (double) pending_cards * predict_cost_per_card_ms();
339 339 }
340 340
341 341 double predict_young_cards_per_entry_ratio() {
342 342 return get_new_prediction(_young_cards_per_entry_ratio_seq);
343 343 }
344 344
345 345 double predict_mixed_cards_per_entry_ratio() {
346 346 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
347 347 return predict_young_cards_per_entry_ratio();
348 348 } else {
349 349 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
350 350 }
351 351 }
352 352
353 353 size_t predict_young_card_num(size_t rs_length) {
354 354 return (size_t) ((double) rs_length *
355 355 predict_young_cards_per_entry_ratio());
356 356 }
357 357
358 358 size_t predict_non_young_card_num(size_t rs_length) {
359 359 return (size_t) ((double) rs_length *
360 360 predict_mixed_cards_per_entry_ratio());
361 361 }
362 362
363 363 double predict_rs_scan_time_ms(size_t card_num) {
364 364 if (gcs_are_young()) {
365 365 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
366 366 } else {
367 367 return predict_mixed_rs_scan_time_ms(card_num);
368 368 }
369 369 }
370 370
371 371 double predict_mixed_rs_scan_time_ms(size_t card_num) {
372 372 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
373 373 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
374 374 } else {
375 375 return (double) (card_num *
376 376 get_new_prediction(_mixed_cost_per_entry_ms_seq));
377 377 }
378 378 }
379 379
380 380 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
381 381 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
382 382 return (1.1 * (double) bytes_to_copy) *
383 383 get_new_prediction(_cost_per_byte_ms_seq);
384 384 } else {
385 385 return (double) bytes_to_copy *
386 386 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
387 387 }
388 388 }
389 389
390 390 double predict_object_copy_time_ms(size_t bytes_to_copy) {
391 391 if (_in_marking_window && !_in_marking_window_im) {
392 392 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
393 393 } else {
394 394 return (double) bytes_to_copy *
395 395 get_new_prediction(_cost_per_byte_ms_seq);
396 396 }
397 397 }
398 398
399 399 double predict_constant_other_time_ms() {
400 400 return get_new_prediction(_constant_other_time_ms_seq);
401 401 }
402 402
403 403 double predict_young_other_time_ms(size_t young_num) {
404 404 return (double) young_num *
405 405 get_new_prediction(_young_other_cost_per_region_ms_seq);
406 406 }
407 407
408 408 double predict_non_young_other_time_ms(size_t non_young_num) {
409 409 return (double) non_young_num *
410 410 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
411 411 }
412 412
413 413 double predict_base_elapsed_time_ms(size_t pending_cards);
414 414 double predict_base_elapsed_time_ms(size_t pending_cards,
415 415 size_t scanned_cards);
416 416 size_t predict_bytes_to_copy(HeapRegion* hr);
417 417 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
418 418
419 419 void set_recorded_rs_lengths(size_t rs_lengths);
420 420
421 421 uint cset_region_length() { return young_cset_region_length() +
422 422 old_cset_region_length(); }
423 423 uint young_cset_region_length() { return eden_cset_region_length() +
424 424 survivor_cset_region_length(); }
425 425
426 426 double predict_survivor_regions_evac_time();
427 427
428 428 void cset_regions_freed() {
429 429 bool propagate = _last_gc_was_young && !_in_marking_window;
430 430 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
431 431 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
432 432 // also call it on any more surv rate groups
433 433 }
434 434
435 435 G1MMUTracker* mmu_tracker() {
436 436 return _mmu_tracker;
437 437 }
438 438
439 439 double max_pause_time_ms() {
440 440 return _mmu_tracker->max_gc_time() * 1000.0;
441 441 }
442 442
443 443 double predict_remark_time_ms() {
444 444 return get_new_prediction(_concurrent_mark_remark_times_ms);
445 445 }
446 446
447 447 double predict_cleanup_time_ms() {
448 448 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
449 449 }
450 450
451 451 // Returns an estimate of the survival rate of the region at yg-age
452 452 // "yg_age".
453 453 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
454 454 TruncatedSeq* seq = surv_rate_group->get_seq(age);
455 455 if (seq->num() == 0)
456 456 gclog_or_tty->print("BARF! age is %d", age);
457 457 guarantee( seq->num() > 0, "invariant" );
458 458 double pred = get_new_prediction(seq);
459 459 if (pred > 1.0)
460 460 pred = 1.0;
461 461 return pred;
462 462 }
463 463
464 464 double predict_yg_surv_rate(int age) {
465 465 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
466 466 }
467 467
468 468 double accum_yg_surv_rate_pred(int age) {
469 469 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
470 470 }
471 471
472 472 private:
473 473 // Statistics kept per GC stoppage, pause or full.
474 474 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
475 475
476 476 // Add a new GC of the given duration and end time to the record.
477 477 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
478 478
479 479 // The head of the list (via "next_in_collection_set()") representing the
480 480 // current collection set. Set from the incrementally built collection
481 481 // set at the start of the pause.
482 482 HeapRegion* _collection_set;
483 483
484 484 // The number of bytes in the collection set before the pause. Set from
485 485 // the incrementally built collection set at the start of an evacuation
486 486 // pause, and incremented in finalize_cset() when adding old regions
487 487 // (if any) to the collection set.
488 488 size_t _collection_set_bytes_used_before;
489 489
490 490 // The number of bytes copied during the GC.
491 491 size_t _bytes_copied_during_gc;
492 492
493 493 // The associated information that is maintained while the incremental
494 494 // collection set is being built with young regions. Used to populate
495 495 // the recorded info for the evacuation pause.
496 496
497 497 enum CSetBuildType {
498 498 Active, // We are actively building the collection set
499 499 Inactive // We are not actively building the collection set
500 500 };
501 501
502 502 CSetBuildType _inc_cset_build_state;
503 503
504 504 // The head of the incrementally built collection set.
505 505 HeapRegion* _inc_cset_head;
506 506
507 507 // The tail of the incrementally built collection set.
508 508 HeapRegion* _inc_cset_tail;
509 509
510 510 // The number of bytes in the incrementally built collection set.
511 511 // Used to set _collection_set_bytes_used_before at the start of
512 512 // an evacuation pause.
513 513 size_t _inc_cset_bytes_used_before;
514 514
515 515 // Used to record the highest end of heap region in collection set
516 516 HeapWord* _inc_cset_max_finger;
517 517
518 518 // The RSet lengths recorded for regions in the CSet. It is updated
519 519 // by the thread that adds a new region to the CSet. We assume that
520 520 // only one thread can be allocating a new CSet region (currently,
521 521 // it does so after taking the Heap_lock) hence no need to
522 522 // synchronize updates to this field.
523 523 size_t _inc_cset_recorded_rs_lengths;
524 524
525 525 // A concurrent refinement thread periodcially samples the young
526 526 // region RSets and needs to update _inc_cset_recorded_rs_lengths as
527 527 // the RSets grow. Instead of having to syncronize updates to that
528 528 // field we accumulate them in this field and add it to
529 529 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
530 530 ssize_t _inc_cset_recorded_rs_lengths_diffs;
531 531
532 532 // The predicted elapsed time it will take to collect the regions in
533 533 // the CSet. This is updated by the thread that adds a new region to
534 534 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
535 535 // MT-safety assumptions.
536 536 double _inc_cset_predicted_elapsed_time_ms;
537 537
538 538 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
539 539 double _inc_cset_predicted_elapsed_time_ms_diffs;
540 540
541 541 // Stash a pointer to the g1 heap.
542 542 G1CollectedHeap* _g1;
543 543
544 544 G1GCPhaseTimes* _phase_times;
545 545
546 546 // The ratio of gc time to elapsed time, computed over recent pauses.
547 547 double _recent_avg_pause_time_ratio;
548 548
549 549 double recent_avg_pause_time_ratio() {
550 550 return _recent_avg_pause_time_ratio;
551 551 }
552 552
553 553 // At the end of a pause we check the heap occupancy and we decide
554 554 // whether we will start a marking cycle during the next pause. If
555 555 // we decide that we want to do that, we will set this parameter to
556 556 // true. So, this parameter will stay true between the end of a
557 557 // pause and the beginning of a subsequent pause (not necessarily
558 558 // the next one, see the comments on the next field) when we decide
559 559 // that we will indeed start a marking cycle and do the initial-mark
560 560 // work.
561 561 volatile bool _initiate_conc_mark_if_possible;
562 562
563 563 // If initiate_conc_mark_if_possible() is set at the beginning of a
564 564 // pause, it is a suggestion that the pause should start a marking
565 565 // cycle by doing the initial-mark work. However, it is possible
566 566 // that the concurrent marking thread is still finishing up the
567 567 // previous marking cycle (e.g., clearing the next marking
568 568 // bitmap). If that is the case we cannot start a new cycle and
569 569 // we'll have to wait for the concurrent marking thread to finish
570 570 // what it is doing. In this case we will postpone the marking cycle
571 571 // initiation decision for the next pause. When we eventually decide
572 572 // to start a cycle, we will set _during_initial_mark_pause which
573 573 // will stay true until the end of the initial-mark pause and it's
574 574 // the condition that indicates that a pause is doing the
575 575 // initial-mark work.
576 576 volatile bool _during_initial_mark_pause;
577 577
578 578 bool _last_young_gc;
579 579
580 580 // This set of variables tracks the collector efficiency, in order to
581 581 // determine whether we should initiate a new marking.
582 582 double _cur_mark_stop_world_time_ms;
583 583 double _mark_remark_start_sec;
584 584 double _mark_cleanup_start_sec;
585 585
586 586 // Update the young list target length either by setting it to the
587 587 // desired fixed value or by calculating it using G1's pause
588 588 // prediction model. If no rs_lengths parameter is passed, predict
589 589 // the RS lengths using the prediction model, otherwise use the
590 590 // given rs_lengths as the prediction.
591 591 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
592 592
593 593 // Calculate and return the minimum desired young list target
594 594 // length. This is the minimum desired young list length according
595 595 // to the user's inputs.
596 596 uint calculate_young_list_desired_min_length(uint base_min_length);
597 597
598 598 // Calculate and return the maximum desired young list target
599 599 // length. This is the maximum desired young list length according
600 600 // to the user's inputs.
601 601 uint calculate_young_list_desired_max_length();
602 602
603 603 // Calculate and return the maximum young list target length that
604 604 // can fit into the pause time goal. The parameters are: rs_lengths
605 605 // represent the prediction of how large the young RSet lengths will
606 606 // be, base_min_length is the alreay existing number of regions in
607 607 // the young list, min_length and max_length are the desired min and
608 608 // max young list length according to the user's inputs.
609 609 uint calculate_young_list_target_length(size_t rs_lengths,
610 610 uint base_min_length,
611 611 uint desired_min_length,
612 612 uint desired_max_length);
613 613
614 614 // Check whether a given young length (young_length) fits into the
615 615 // given target pause time and whether the prediction for the amount
616 616 // of objects to be copied for the given length will fit into the
617 617 // given free space (expressed by base_free_regions). It is used by
618 618 // calculate_young_list_target_length().
619 619 bool predict_will_fit(uint young_length, double base_time_ms,
620 620 uint base_free_regions, double target_pause_time_ms);
621 621
622 622 public:
623 623
624 624 G1CollectorPolicy();
625 625
626 626 virtual G1CollectorPolicy* as_g1_policy() { return this; }
627 627
628 628 virtual CollectorPolicy::Name kind() {
629 629 return CollectorPolicy::G1CollectorPolicyKind;
630 630 }
631 631
632 632 G1GCPhaseTimes* phase_times() const { return _phase_times; }
633 633
634 634 // Check the current value of the young list RSet lengths and
635 635 // compare it against the last prediction. If the current value is
636 636 // higher, recalculate the young list target length prediction.
637 637 void revise_young_list_target_length_if_necessary();
638 638
639 639 // This should be called after the heap is resized.
640 640 void record_new_heap_size(uint new_number_of_regions);
641 641
642 642 void init();
643 643
644 644 // Create jstat counters for the policy.
645 645 virtual void initialize_gc_policy_counters();
646 646
647 647 virtual HeapWord* mem_allocate_work(size_t size,
648 648 bool is_tlab,
649 649 bool* gc_overhead_limit_was_exceeded);
650 650
651 651 // This method controls how a collector handles one or more
652 652 // of its generations being fully allocated.
653 653 virtual HeapWord* satisfy_failed_allocation(size_t size,
654 654 bool is_tlab);
655 655
656 656 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
657 657
658 658 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
659 659
660 660 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
661 661
662 662 // Update the heuristic info to record a collection pause of the given
663 663 // start time, where the given number of bytes were used at the start.
664 664 // This may involve changing the desired size of a collection set.
665 665
666 666 void record_stop_world_start();
667 667
668 668 void record_collection_pause_start(double start_time_sec, size_t start_used);
669 669
670 670 // Must currently be called while the world is stopped.
671 671 void record_concurrent_mark_init_end(double
672 672 mark_init_elapsed_time_ms);
673 673
674 674 void record_concurrent_mark_remark_start();
675 675 void record_concurrent_mark_remark_end();
676 676
677 677 void record_concurrent_mark_cleanup_start();
678 678 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
679 679 void record_concurrent_mark_cleanup_completed();
680 680
681 681 void record_concurrent_pause();
682 682
683 683 void record_collection_pause_end(double pause_time);
684 684 void print_heap_transition();
685 685 void print_detailed_heap_transition();
686 686
687 687 // Record the fact that a full collection occurred.
688 688 void record_full_collection_start();
689 689 void record_full_collection_end();
690 690
691 691 // Record how much space we copied during a GC. This is typically
692 692 // called when a GC alloc region is being retired.
693 693 void record_bytes_copied_during_gc(size_t bytes) {
694 694 _bytes_copied_during_gc += bytes;
695 695 }
696 696
697 697 // The amount of space we copied during a GC.
698 698 size_t bytes_copied_during_gc() {
699 699 return _bytes_copied_during_gc;
700 700 }
701 701
702 702 // Determine whether there are candidate regions so that the
703 703 // next GC should be mixed. The two action strings are used
704 704 // in the ergo output when the method returns true or false.
705 705 bool next_gc_should_be_mixed(const char* true_action_str,
706 706 const char* false_action_str);
707 707
708 708 // Choose a new collection set. Marks the chosen regions as being
709 709 // "in_collection_set", and links them together. The head and number of
710 710 // the collection set are available via access methods.
711 711 void finalize_cset(double target_pause_time_ms);
712 712
713 713 // The head of the list (via "next_in_collection_set()") representing the
714 714 // current collection set.
715 715 HeapRegion* collection_set() { return _collection_set; }
716 716
717 717 void clear_collection_set() { _collection_set = NULL; }
718 718
719 719 // Add old region "hr" to the CSet.
720 720 void add_old_region_to_cset(HeapRegion* hr);
721 721
722 722 // Incremental CSet Support
723 723
724 724 // The head of the incrementally built collection set.
725 725 HeapRegion* inc_cset_head() { return _inc_cset_head; }
726 726
727 727 // The tail of the incrementally built collection set.
728 728 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
729 729
730 730 // Initialize incremental collection set info.
731 731 void start_incremental_cset_building();
732 732
733 733 // Perform any final calculations on the incremental CSet fields
734 734 // before we can use them.
735 735 void finalize_incremental_cset_building();
736 736
737 737 void clear_incremental_cset() {
738 738 _inc_cset_head = NULL;
739 739 _inc_cset_tail = NULL;
740 740 }
741 741
742 742 // Stop adding regions to the incremental collection set
743 743 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
744 744
745 745 // Add information about hr to the aggregated information for the
746 746 // incrementally built collection set.
747 747 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
748 748
749 749 // Update information about hr in the aggregated information for
750 750 // the incrementally built collection set.
751 751 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
752 752
753 753 private:
754 754 // Update the incremental cset information when adding a region
755 755 // (should not be called directly).
756 756 void add_region_to_incremental_cset_common(HeapRegion* hr);
757 757
758 758 public:
759 759 // Add hr to the LHS of the incremental collection set.
760 760 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
761 761
762 762 // Add hr to the RHS of the incremental collection set.
763 763 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
764 764
765 765 #ifndef PRODUCT
766 766 void print_collection_set(HeapRegion* list_head, outputStream* st);
767 767 #endif // !PRODUCT
768 768
769 769 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
770 770 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
771 771 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
772 772
773 773 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
774 774 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
775 775 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
776 776
777 777 // This sets the initiate_conc_mark_if_possible() flag to start a
778 778 // new cycle, as long as we are not already in one. It's best if it
779 779 // is called during a safepoint when the test whether a cycle is in
780 780 // progress or not is stable.
781 781 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
782 782
783 783 // This is called at the very beginning of an evacuation pause (it
784 784 // has to be the first thing that the pause does). If
785 785 // initiate_conc_mark_if_possible() is true, and the concurrent
786 786 // marking thread has completed its work during the previous cycle,
787 787 // it will set during_initial_mark_pause() to so that the pause does
788 788 // the initial-mark work and start a marking cycle.
789 789 void decide_on_conc_mark_initiation();
790 790
791 791 // If an expansion would be appropriate, because recent GC overhead had
792 792 // exceeded the desired limit, return an amount to expand by.
793 793 size_t expansion_amount();
794 794
795 795 // Print tracing information.
796 796 void print_tracing_info() const;
797 797
798 798 // Print stats on young survival ratio
799 799 void print_yg_surv_rate_info() const;
800 800
801 801 void finished_recalculating_age_indexes(bool is_survivors) {
802 802 if (is_survivors) {
803 803 _survivor_surv_rate_group->finished_recalculating_age_indexes();
804 804 } else {
805 805 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
806 806 }
807 807 // do that for any other surv rate groups
808 808 }
809 809
810 810 bool is_young_list_full() {
811 811 uint young_list_length = _g1->young_list()->length();
812 812 uint young_list_target_length = _young_list_target_length;
813 813 return young_list_length >= young_list_target_length;
814 814 }
815 815
816 816 bool can_expand_young_list() {
817 817 uint young_list_length = _g1->young_list()->length();
818 818 uint young_list_max_length = _young_list_max_length;
819 819 return young_list_length < young_list_max_length;
820 820 }
821 821
822 822 uint young_list_max_length() {
823 823 return _young_list_max_length;
824 824 }
825 825
826 826 bool gcs_are_young() {
827 827 return _gcs_are_young;
828 828 }
829 829 void set_gcs_are_young(bool gcs_are_young) {
830 830 _gcs_are_young = gcs_are_young;
831 831 }
832 832
833 833 bool adaptive_young_list_length() {
834 834 return _young_gen_sizer->adaptive_young_list_length();
835 835 }
836 836
837 837 private:
838 838 //
839 839 // Survivor regions policy.
840 840 //
841 841
842 842 // Current tenuring threshold, set to 0 if the collector reaches the
843 843 // maximum amount of survivors regions.
844 844 uint _tenuring_threshold;
845 845
846 846 // The limit on the number of regions allocated for survivors.
847 847 uint _max_survivor_regions;
848 848
849 849 // For reporting purposes.
850 850 size_t _eden_bytes_before_gc;
851 851 size_t _survivor_bytes_before_gc;
852 852 size_t _capacity_before_gc;
853 853
854 854 // The amount of survivor regions after a collection.
855 855 uint _recorded_survivor_regions;
856 856 // List of survivor regions.
857 857 HeapRegion* _recorded_survivor_head;
858 858 HeapRegion* _recorded_survivor_tail;
859 859
860 860 ageTable _survivors_age_table;
861 861
862 862 public:
863 863
864 864 inline GCAllocPurpose
865 865 evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
866 866 if (age < _tenuring_threshold && src_region->is_young()) {
867 867 return GCAllocForSurvived;
868 868 } else {
869 869 return GCAllocForTenured;
870 870 }
871 871 }
872 872
873 873 inline bool track_object_age(GCAllocPurpose purpose) {
874 874 return purpose == GCAllocForSurvived;
875 875 }
876 876
877 877 static const uint REGIONS_UNLIMITED = (uint) -1;
878 878
879 879 uint max_regions(int purpose);
880 880
881 881 // The limit on regions for a particular purpose is reached.
882 882 void note_alloc_region_limit_reached(int purpose) {
883 883 if (purpose == GCAllocForSurvived) {
884 884 _tenuring_threshold = 0;
885 885 }
886 886 }
887 887
888 888 void note_start_adding_survivor_regions() {
889 889 _survivor_surv_rate_group->start_adding_regions();
890 890 }
891 891
892 892 void note_stop_adding_survivor_regions() {
893 893 _survivor_surv_rate_group->stop_adding_regions();
894 894 }
895 895
896 896 void record_survivor_regions(uint regions,
897 897 HeapRegion* head,
898 898 HeapRegion* tail) {
899 899 _recorded_survivor_regions = regions;
900 900 _recorded_survivor_head = head;
901 901 _recorded_survivor_tail = tail;
902 902 }
903 903
904 904 uint recorded_survivor_regions() {
905 905 return _recorded_survivor_regions;
906 906 }
907 907
908 908 void record_thread_age_table(ageTable* age_table) {
909 909 _survivors_age_table.merge_par(age_table);
910 910 }
911 911
912 912 void update_max_gc_locker_expansion();
913 913
914 914 // Calculates survivor space parameters.
915 915 void update_survivors_policy();
916 916
917 917 };
918 918
919 919 // This should move to some place more general...
920 920
921 921 // If we have "n" measurements, and we've kept track of their "sum" and the
922 922 // "sum_of_squares" of the measurements, this returns the variance of the
923 923 // sequence.
924 924 inline double variance(int n, double sum_of_squares, double sum) {
925 925 double n_d = (double)n;
926 926 double avg = sum/n_d;
927 927 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
928 928 }
929 929
930 930 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
↓ open down ↓ |
812 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX