Print this page
rev 6911 : 8065305: Make it possible to extend the G1CollectorPolicy
Summary: Added a G1CollectorPolicyExt where it is possible to extend the class.
Reviewed-by: sjohanss, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+++ new/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
27 27
28 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 +#include "gc_implementation/g1/g1Allocator.hpp"
29 30 #include "gc_implementation/g1/g1MMUTracker.hpp"
30 31 #include "memory/collectorPolicy.hpp"
31 32
32 33 // A G1CollectorPolicy makes policy decisions that determine the
33 34 // characteristics of the collector. Examples include:
34 35 // * choice of collection set.
35 36 // * when to collect.
36 37
37 38 class HeapRegion;
38 39 class CollectionSetChooser;
39 40 class G1GCPhaseTimes;
40 41
41 42 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
42 43 // (the latter may contain non-young regions - i.e. regions that are
43 44 // technically in Gen1) while TraceGen1Time collects data about full GCs.
44 45 class TraceGen0TimeData : public CHeapObj<mtGC> {
45 46 private:
46 47 unsigned _young_pause_num;
47 48 unsigned _mixed_pause_num;
48 49
49 50 NumberSeq _all_stop_world_times_ms;
50 51 NumberSeq _all_yield_times_ms;
51 52
52 53 NumberSeq _total;
53 54 NumberSeq _other;
54 55 NumberSeq _root_region_scan_wait;
55 56 NumberSeq _parallel;
56 57 NumberSeq _ext_root_scan;
57 58 NumberSeq _satb_filtering;
58 59 NumberSeq _update_rs;
59 60 NumberSeq _scan_rs;
60 61 NumberSeq _obj_copy;
61 62 NumberSeq _termination;
62 63 NumberSeq _parallel_other;
63 64 NumberSeq _clear_ct;
64 65
65 66 void print_summary(const char* str, const NumberSeq* seq) const;
66 67 void print_summary_sd(const char* str, const NumberSeq* seq) const;
67 68
68 69 public:
69 70 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
70 71 void record_start_collection(double time_to_stop_the_world_ms);
71 72 void record_yield_time(double yield_time_ms);
72 73 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
73 74 void increment_young_collection_count();
74 75 void increment_mixed_collection_count();
75 76 void print() const;
76 77 };
77 78
78 79 class TraceGen1TimeData : public CHeapObj<mtGC> {
79 80 private:
80 81 NumberSeq _all_full_gc_times;
81 82
82 83 public:
83 84 void record_full_collection(double full_gc_time_ms);
84 85 void print() const;
85 86 };
86 87
87 88 // There are three command line options related to the young gen size:
88 89 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
89 90 // just a short form for NewSize==MaxNewSize). G1 will use its internal
90 91 // heuristics to calculate the actual young gen size, so these options
91 92 // basically only limit the range within which G1 can pick a young gen
92 93 // size. Also, these are general options taking byte sizes. G1 will
93 94 // internally work with a number of regions instead. So, some rounding
94 95 // will occur.
95 96 //
96 97 // If nothing related to the the young gen size is set on the command
97 98 // line we should allow the young gen to be between G1NewSizePercent
98 99 // and G1MaxNewSizePercent of the heap size. This means that every time
99 100 // the heap size changes, the limits for the young gen size will be
100 101 // recalculated.
101 102 //
102 103 // If only -XX:NewSize is set we should use the specified value as the
103 104 // minimum size for young gen. Still using G1MaxNewSizePercent of the
104 105 // heap as maximum.
105 106 //
106 107 // If only -XX:MaxNewSize is set we should use the specified value as the
107 108 // maximum size for young gen. Still using G1NewSizePercent of the heap
108 109 // as minimum.
109 110 //
110 111 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
111 112 // No updates when the heap size changes. There is a special case when
112 113 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
113 114 // different heuristic for calculating the collection set when we do mixed
114 115 // collection.
115 116 //
116 117 // If only -XX:NewRatio is set we should use the specified ratio of the heap
117 118 // as both min and max. This will be interpreted as "fixed" just like the
118 119 // NewSize==MaxNewSize case above. But we will update the min and max
119 120 // everytime the heap size changes.
120 121 //
121 122 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
122 123 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
123 124 class G1YoungGenSizer : public CHeapObj<mtGC> {
124 125 private:
125 126 enum SizerKind {
126 127 SizerDefaults,
127 128 SizerNewSizeOnly,
128 129 SizerMaxNewSizeOnly,
129 130 SizerMaxAndNewSize,
130 131 SizerNewRatio
131 132 };
132 133 SizerKind _sizer_kind;
133 134 uint _min_desired_young_length;
134 135 uint _max_desired_young_length;
135 136 bool _adaptive_size;
136 137 uint calculate_default_min_length(uint new_number_of_heap_regions);
137 138 uint calculate_default_max_length(uint new_number_of_heap_regions);
138 139
139 140 // Update the given values for minimum and maximum young gen length in regions
140 141 // given the number of heap regions depending on the kind of sizing algorithm.
141 142 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
142 143
143 144 public:
144 145 G1YoungGenSizer();
145 146 // Calculate the maximum length of the young gen given the number of regions
146 147 // depending on the sizing algorithm.
147 148 uint max_young_length(uint number_of_heap_regions);
148 149
149 150 void heap_size_changed(uint new_number_of_heap_regions);
150 151 uint min_desired_young_length() {
151 152 return _min_desired_young_length;
152 153 }
153 154 uint max_desired_young_length() {
154 155 return _max_desired_young_length;
155 156 }
156 157 bool adaptive_young_list_length() {
157 158 return _adaptive_size;
158 159 }
159 160 };
160 161
161 162 class G1CollectorPolicy: public CollectorPolicy {
162 163 private:
163 164 // either equal to the number of parallel threads, if ParallelGCThreads
164 165 // has been set, or 1 otherwise
165 166 int _parallel_gc_threads;
166 167
167 168 // The number of GC threads currently active.
168 169 uintx _no_of_gc_threads;
169 170
170 171 enum SomePrivateConstants {
171 172 NumPrevPausesForHeuristics = 10
172 173 };
173 174
174 175 G1MMUTracker* _mmu_tracker;
175 176
176 177 void initialize_alignments();
177 178 void initialize_flags();
178 179
179 180 CollectionSetChooser* _collectionSetChooser;
180 181
181 182 double _full_collection_start_sec;
182 183 uint _cur_collection_pause_used_regions_at_start;
183 184
184 185 // These exclude marking times.
185 186 TruncatedSeq* _recent_gc_times_ms;
186 187
187 188 TruncatedSeq* _concurrent_mark_remark_times_ms;
188 189 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
189 190
190 191 TraceGen0TimeData _trace_gen0_time_data;
191 192 TraceGen1TimeData _trace_gen1_time_data;
192 193
193 194 double _stop_world_start;
194 195
195 196 // indicates whether we are in young or mixed GC mode
196 197 bool _gcs_are_young;
197 198
198 199 uint _young_list_target_length;
199 200 uint _young_list_fixed_length;
200 201
201 202 // The max number of regions we can extend the eden by while the GC
202 203 // locker is active. This should be >= _young_list_target_length;
203 204 uint _young_list_max_length;
204 205
205 206 bool _last_gc_was_young;
206 207
207 208 bool _during_marking;
208 209 bool _in_marking_window;
209 210 bool _in_marking_window_im;
210 211
211 212 SurvRateGroup* _short_lived_surv_rate_group;
212 213 SurvRateGroup* _survivor_surv_rate_group;
213 214 // add here any more surv rate groups
214 215
215 216 double _gc_overhead_perc;
216 217
217 218 double _reserve_factor;
218 219 uint _reserve_regions;
219 220
220 221 bool during_marking() {
221 222 return _during_marking;
222 223 }
223 224
224 225 enum PredictionConstants {
225 226 TruncatedSeqLength = 10
226 227 };
227 228
228 229 TruncatedSeq* _alloc_rate_ms_seq;
229 230 double _prev_collection_pause_end_ms;
230 231
231 232 TruncatedSeq* _rs_length_diff_seq;
232 233 TruncatedSeq* _cost_per_card_ms_seq;
233 234 TruncatedSeq* _young_cards_per_entry_ratio_seq;
234 235 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
235 236 TruncatedSeq* _cost_per_entry_ms_seq;
236 237 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
237 238 TruncatedSeq* _cost_per_byte_ms_seq;
238 239 TruncatedSeq* _constant_other_time_ms_seq;
239 240 TruncatedSeq* _young_other_cost_per_region_ms_seq;
240 241 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
241 242
242 243 TruncatedSeq* _pending_cards_seq;
243 244 TruncatedSeq* _rs_lengths_seq;
244 245
245 246 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
246 247
247 248 G1YoungGenSizer* _young_gen_sizer;
248 249
249 250 uint _eden_cset_region_length;
250 251 uint _survivor_cset_region_length;
251 252 uint _old_cset_region_length;
252 253
253 254 void init_cset_region_lengths(uint eden_cset_region_length,
254 255 uint survivor_cset_region_length);
255 256
256 257 uint eden_cset_region_length() { return _eden_cset_region_length; }
257 258 uint survivor_cset_region_length() { return _survivor_cset_region_length; }
258 259 uint old_cset_region_length() { return _old_cset_region_length; }
259 260
260 261 uint _free_regions_at_end_of_collection;
261 262
262 263 size_t _recorded_rs_lengths;
263 264 size_t _max_rs_lengths;
264 265 double _sigma;
265 266
266 267 size_t _rs_lengths_prediction;
267 268
268 269 double sigma() { return _sigma; }
269 270
270 271 // A function that prevents us putting too much stock in small sample
271 272 // sets. Returns a number between 2.0 and 1.0, depending on the number
272 273 // of samples. 5 or more samples yields one; fewer scales linearly from
273 274 // 2.0 at 1 sample to 1.0 at 5.
274 275 double confidence_factor(int samples) {
275 276 if (samples > 4) return 1.0;
276 277 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
277 278 }
278 279
279 280 double get_new_neg_prediction(TruncatedSeq* seq) {
280 281 return seq->davg() - sigma() * seq->dsd();
281 282 }
282 283
283 284 #ifndef PRODUCT
284 285 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
285 286 #endif // PRODUCT
286 287
287 288 void adjust_concurrent_refinement(double update_rs_time,
288 289 double update_rs_processed_buffers,
289 290 double goal_ms);
290 291
291 292 uintx no_of_gc_threads() { return _no_of_gc_threads; }
292 293 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
293 294
294 295 double _pause_time_target_ms;
295 296
296 297 size_t _pending_cards;
297 298
298 299 public:
299 300 // Accessors
300 301
301 302 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
302 303 hr->set_eden();
303 304 hr->install_surv_rate_group(_short_lived_surv_rate_group);
304 305 hr->set_young_index_in_cset(young_index_in_cset);
305 306 }
306 307
307 308 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
308 309 assert(hr->is_survivor(), "pre-condition");
309 310 hr->install_surv_rate_group(_survivor_surv_rate_group);
310 311 hr->set_young_index_in_cset(young_index_in_cset);
311 312 }
312 313
313 314 #ifndef PRODUCT
314 315 bool verify_young_ages();
315 316 #endif // PRODUCT
316 317
317 318 double get_new_prediction(TruncatedSeq* seq) {
318 319 return MAX2(seq->davg() + sigma() * seq->dsd(),
319 320 seq->davg() * confidence_factor(seq->num()));
320 321 }
321 322
322 323 void record_max_rs_lengths(size_t rs_lengths) {
323 324 _max_rs_lengths = rs_lengths;
324 325 }
325 326
326 327 size_t predict_rs_length_diff() {
327 328 return (size_t) get_new_prediction(_rs_length_diff_seq);
328 329 }
329 330
330 331 double predict_alloc_rate_ms() {
331 332 return get_new_prediction(_alloc_rate_ms_seq);
332 333 }
333 334
334 335 double predict_cost_per_card_ms() {
335 336 return get_new_prediction(_cost_per_card_ms_seq);
336 337 }
337 338
338 339 double predict_rs_update_time_ms(size_t pending_cards) {
339 340 return (double) pending_cards * predict_cost_per_card_ms();
340 341 }
341 342
342 343 double predict_young_cards_per_entry_ratio() {
343 344 return get_new_prediction(_young_cards_per_entry_ratio_seq);
344 345 }
345 346
346 347 double predict_mixed_cards_per_entry_ratio() {
347 348 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
348 349 return predict_young_cards_per_entry_ratio();
349 350 } else {
350 351 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
351 352 }
352 353 }
353 354
354 355 size_t predict_young_card_num(size_t rs_length) {
355 356 return (size_t) ((double) rs_length *
356 357 predict_young_cards_per_entry_ratio());
357 358 }
358 359
359 360 size_t predict_non_young_card_num(size_t rs_length) {
360 361 return (size_t) ((double) rs_length *
361 362 predict_mixed_cards_per_entry_ratio());
362 363 }
363 364
364 365 double predict_rs_scan_time_ms(size_t card_num) {
365 366 if (gcs_are_young()) {
366 367 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
367 368 } else {
368 369 return predict_mixed_rs_scan_time_ms(card_num);
369 370 }
370 371 }
371 372
372 373 double predict_mixed_rs_scan_time_ms(size_t card_num) {
373 374 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
374 375 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
375 376 } else {
376 377 return (double) (card_num *
377 378 get_new_prediction(_mixed_cost_per_entry_ms_seq));
378 379 }
379 380 }
380 381
381 382 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
382 383 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
383 384 return (1.1 * (double) bytes_to_copy) *
384 385 get_new_prediction(_cost_per_byte_ms_seq);
385 386 } else {
386 387 return (double) bytes_to_copy *
387 388 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
388 389 }
389 390 }
390 391
391 392 double predict_object_copy_time_ms(size_t bytes_to_copy) {
392 393 if (_in_marking_window && !_in_marking_window_im) {
393 394 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
394 395 } else {
395 396 return (double) bytes_to_copy *
396 397 get_new_prediction(_cost_per_byte_ms_seq);
397 398 }
398 399 }
399 400
400 401 double predict_constant_other_time_ms() {
401 402 return get_new_prediction(_constant_other_time_ms_seq);
402 403 }
403 404
404 405 double predict_young_other_time_ms(size_t young_num) {
405 406 return (double) young_num *
406 407 get_new_prediction(_young_other_cost_per_region_ms_seq);
407 408 }
408 409
409 410 double predict_non_young_other_time_ms(size_t non_young_num) {
410 411 return (double) non_young_num *
411 412 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
412 413 }
413 414
414 415 double predict_base_elapsed_time_ms(size_t pending_cards);
415 416 double predict_base_elapsed_time_ms(size_t pending_cards,
416 417 size_t scanned_cards);
417 418 size_t predict_bytes_to_copy(HeapRegion* hr);
418 419 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
419 420
420 421 void set_recorded_rs_lengths(size_t rs_lengths);
421 422
422 423 uint cset_region_length() { return young_cset_region_length() +
423 424 old_cset_region_length(); }
424 425 uint young_cset_region_length() { return eden_cset_region_length() +
425 426 survivor_cset_region_length(); }
426 427
427 428 double predict_survivor_regions_evac_time();
428 429
429 430 void cset_regions_freed() {
430 431 bool propagate = _last_gc_was_young && !_in_marking_window;
431 432 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
432 433 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
433 434 // also call it on any more surv rate groups
434 435 }
435 436
436 437 G1MMUTracker* mmu_tracker() {
437 438 return _mmu_tracker;
438 439 }
439 440
440 441 double max_pause_time_ms() {
441 442 return _mmu_tracker->max_gc_time() * 1000.0;
442 443 }
443 444
444 445 double predict_remark_time_ms() {
445 446 return get_new_prediction(_concurrent_mark_remark_times_ms);
446 447 }
447 448
448 449 double predict_cleanup_time_ms() {
449 450 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
450 451 }
451 452
452 453 // Returns an estimate of the survival rate of the region at yg-age
453 454 // "yg_age".
454 455 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
455 456 TruncatedSeq* seq = surv_rate_group->get_seq(age);
456 457 if (seq->num() == 0)
457 458 gclog_or_tty->print("BARF! age is %d", age);
458 459 guarantee( seq->num() > 0, "invariant" );
459 460 double pred = get_new_prediction(seq);
460 461 if (pred > 1.0)
461 462 pred = 1.0;
462 463 return pred;
463 464 }
464 465
465 466 double predict_yg_surv_rate(int age) {
466 467 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
467 468 }
468 469
469 470 double accum_yg_surv_rate_pred(int age) {
470 471 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
471 472 }
472 473
473 474 private:
474 475 // Statistics kept per GC stoppage, pause or full.
475 476 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
476 477
477 478 // Add a new GC of the given duration and end time to the record.
478 479 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
479 480
480 481 // The head of the list (via "next_in_collection_set()") representing the
481 482 // current collection set. Set from the incrementally built collection
482 483 // set at the start of the pause.
483 484 HeapRegion* _collection_set;
484 485
485 486 // The number of bytes in the collection set before the pause. Set from
486 487 // the incrementally built collection set at the start of an evacuation
487 488 // pause, and incremented in finalize_cset() when adding old regions
488 489 // (if any) to the collection set.
489 490 size_t _collection_set_bytes_used_before;
490 491
491 492 // The number of bytes copied during the GC.
492 493 size_t _bytes_copied_during_gc;
493 494
494 495 // The associated information that is maintained while the incremental
495 496 // collection set is being built with young regions. Used to populate
496 497 // the recorded info for the evacuation pause.
497 498
498 499 enum CSetBuildType {
499 500 Active, // We are actively building the collection set
500 501 Inactive // We are not actively building the collection set
501 502 };
502 503
503 504 CSetBuildType _inc_cset_build_state;
504 505
505 506 // The head of the incrementally built collection set.
506 507 HeapRegion* _inc_cset_head;
507 508
508 509 // The tail of the incrementally built collection set.
509 510 HeapRegion* _inc_cset_tail;
510 511
511 512 // The number of bytes in the incrementally built collection set.
512 513 // Used to set _collection_set_bytes_used_before at the start of
513 514 // an evacuation pause.
514 515 size_t _inc_cset_bytes_used_before;
515 516
516 517 // Used to record the highest end of heap region in collection set
517 518 HeapWord* _inc_cset_max_finger;
518 519
519 520 // The RSet lengths recorded for regions in the CSet. It is updated
520 521 // by the thread that adds a new region to the CSet. We assume that
521 522 // only one thread can be allocating a new CSet region (currently,
522 523 // it does so after taking the Heap_lock) hence no need to
523 524 // synchronize updates to this field.
524 525 size_t _inc_cset_recorded_rs_lengths;
525 526
526 527 // A concurrent refinement thread periodcially samples the young
527 528 // region RSets and needs to update _inc_cset_recorded_rs_lengths as
528 529 // the RSets grow. Instead of having to syncronize updates to that
529 530 // field we accumulate them in this field and add it to
530 531 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
531 532 ssize_t _inc_cset_recorded_rs_lengths_diffs;
532 533
533 534 // The predicted elapsed time it will take to collect the regions in
534 535 // the CSet. This is updated by the thread that adds a new region to
535 536 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
536 537 // MT-safety assumptions.
537 538 double _inc_cset_predicted_elapsed_time_ms;
538 539
539 540 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
540 541 double _inc_cset_predicted_elapsed_time_ms_diffs;
541 542
542 543 // Stash a pointer to the g1 heap.
543 544 G1CollectedHeap* _g1;
544 545
545 546 G1GCPhaseTimes* _phase_times;
546 547
547 548 // The ratio of gc time to elapsed time, computed over recent pauses.
548 549 double _recent_avg_pause_time_ratio;
549 550
550 551 double recent_avg_pause_time_ratio() {
551 552 return _recent_avg_pause_time_ratio;
552 553 }
553 554
554 555 // At the end of a pause we check the heap occupancy and we decide
555 556 // whether we will start a marking cycle during the next pause. If
556 557 // we decide that we want to do that, we will set this parameter to
557 558 // true. So, this parameter will stay true between the end of a
558 559 // pause and the beginning of a subsequent pause (not necessarily
559 560 // the next one, see the comments on the next field) when we decide
560 561 // that we will indeed start a marking cycle and do the initial-mark
561 562 // work.
562 563 volatile bool _initiate_conc_mark_if_possible;
563 564
564 565 // If initiate_conc_mark_if_possible() is set at the beginning of a
565 566 // pause, it is a suggestion that the pause should start a marking
566 567 // cycle by doing the initial-mark work. However, it is possible
567 568 // that the concurrent marking thread is still finishing up the
568 569 // previous marking cycle (e.g., clearing the next marking
569 570 // bitmap). If that is the case we cannot start a new cycle and
570 571 // we'll have to wait for the concurrent marking thread to finish
571 572 // what it is doing. In this case we will postpone the marking cycle
572 573 // initiation decision for the next pause. When we eventually decide
573 574 // to start a cycle, we will set _during_initial_mark_pause which
574 575 // will stay true until the end of the initial-mark pause and it's
575 576 // the condition that indicates that a pause is doing the
576 577 // initial-mark work.
577 578 volatile bool _during_initial_mark_pause;
578 579
579 580 bool _last_young_gc;
580 581
581 582 // This set of variables tracks the collector efficiency, in order to
582 583 // determine whether we should initiate a new marking.
583 584 double _cur_mark_stop_world_time_ms;
584 585 double _mark_remark_start_sec;
585 586 double _mark_cleanup_start_sec;
586 587
587 588 // Update the young list target length either by setting it to the
588 589 // desired fixed value or by calculating it using G1's pause
589 590 // prediction model. If no rs_lengths parameter is passed, predict
590 591 // the RS lengths using the prediction model, otherwise use the
591 592 // given rs_lengths as the prediction.
592 593 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
593 594
594 595 // Calculate and return the minimum desired young list target
595 596 // length. This is the minimum desired young list length according
596 597 // to the user's inputs.
597 598 uint calculate_young_list_desired_min_length(uint base_min_length);
598 599
599 600 // Calculate and return the maximum desired young list target
600 601 // length. This is the maximum desired young list length according
601 602 // to the user's inputs.
602 603 uint calculate_young_list_desired_max_length();
603 604
604 605 // Calculate and return the maximum young list target length that
605 606 // can fit into the pause time goal. The parameters are: rs_lengths
606 607 // represent the prediction of how large the young RSet lengths will
607 608 // be, base_min_length is the alreay existing number of regions in
608 609 // the young list, min_length and max_length are the desired min and
609 610 // max young list length according to the user's inputs.
610 611 uint calculate_young_list_target_length(size_t rs_lengths,
611 612 uint base_min_length,
612 613 uint desired_min_length,
613 614 uint desired_max_length);
614 615
615 616 // Check whether a given young length (young_length) fits into the
616 617 // given target pause time and whether the prediction for the amount
617 618 // of objects to be copied for the given length will fit into the
618 619 // given free space (expressed by base_free_regions). It is used by
619 620 // calculate_young_list_target_length().
620 621 bool predict_will_fit(uint young_length, double base_time_ms,
621 622 uint base_free_regions, double target_pause_time_ms);
622 623
623 624 // Calculate the minimum number of old regions we'll add to the CSet
624 625 // during a mixed GC.
625 626 uint calc_min_old_cset_length();
626 627
627 628 // Calculate the maximum number of old regions we'll add to the CSet
628 629 // during a mixed GC.
629 630 uint calc_max_old_cset_length();
630 631
631 632 // Returns the given amount of uncollected reclaimable space
632 633 // as a percentage of the current heap capacity.
633 634 double reclaimable_bytes_perc(size_t reclaimable_bytes);
634 635
635 636 public:
636 637
637 638 G1CollectorPolicy();
638 639
639 640 virtual G1CollectorPolicy* as_g1_policy() { return this; }
640 641
641 642 virtual CollectorPolicy::Name kind() {
642 643 return CollectorPolicy::G1CollectorPolicyKind;
643 644 }
644 645
645 646 G1GCPhaseTimes* phase_times() const { return _phase_times; }
646 647
647 648 // Check the current value of the young list RSet lengths and
648 649 // compare it against the last prediction. If the current value is
649 650 // higher, recalculate the young list target length prediction.
650 651 void revise_young_list_target_length_if_necessary();
651 652
652 653 // This should be called after the heap is resized.
653 654 void record_new_heap_size(uint new_number_of_regions);
654 655
655 656 void init();
656 657
657 658 // Create jstat counters for the policy.
658 659 virtual void initialize_gc_policy_counters();
659 660
660 661 virtual HeapWord* mem_allocate_work(size_t size,
661 662 bool is_tlab,
662 663 bool* gc_overhead_limit_was_exceeded);
663 664
664 665 // This method controls how a collector handles one or more
665 666 // of its generations being fully allocated.
666 667 virtual HeapWord* satisfy_failed_allocation(size_t size,
667 668 bool is_tlab);
668 669
669 670 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
670 671
671 672 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
672 673
673 674 // Record the start and end of an evacuation pause.
674 675 void record_collection_pause_start(double start_time_sec);
675 676 void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
676 677
677 678 // Record the start and end of a full collection.
678 679 void record_full_collection_start();
679 680 void record_full_collection_end();
680 681
681 682 // Must currently be called while the world is stopped.
682 683 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
683 684
684 685 // Record start and end of remark.
685 686 void record_concurrent_mark_remark_start();
686 687 void record_concurrent_mark_remark_end();
687 688
688 689 // Record start, end, and completion of cleanup.
689 690 void record_concurrent_mark_cleanup_start();
690 691 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
691 692 void record_concurrent_mark_cleanup_completed();
692 693
693 694 // Records the information about the heap size for reporting in
694 695 // print_detailed_heap_transition
695 696 void record_heap_size_info_at_start(bool full);
696 697
697 698 // Print heap sizing transition (with less and more detail).
698 699 void print_heap_transition();
699 700 void print_detailed_heap_transition(bool full = false);
700 701
701 702 void record_stop_world_start();
702 703 void record_concurrent_pause();
703 704
704 705 // Record how much space we copied during a GC. This is typically
705 706 // called when a GC alloc region is being retired.
706 707 void record_bytes_copied_during_gc(size_t bytes) {
707 708 _bytes_copied_during_gc += bytes;
708 709 }
709 710
710 711 // The amount of space we copied during a GC.
711 712 size_t bytes_copied_during_gc() {
712 713 return _bytes_copied_during_gc;
713 714 }
714 715
715 716 // Determine whether there are candidate regions so that the
716 717 // next GC should be mixed. The two action strings are used
717 718 // in the ergo output when the method returns true or false.
718 719 bool next_gc_should_be_mixed(const char* true_action_str,
719 720 const char* false_action_str);
720 721
721 722 // Choose a new collection set. Marks the chosen regions as being
722 723 // "in_collection_set", and links them together. The head and number of
723 724 // the collection set are available via access methods.
724 725 void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
725 726
726 727 // The head of the list (via "next_in_collection_set()") representing the
727 728 // current collection set.
728 729 HeapRegion* collection_set() { return _collection_set; }
729 730
730 731 void clear_collection_set() { _collection_set = NULL; }
731 732
732 733 // Add old region "hr" to the CSet.
733 734 void add_old_region_to_cset(HeapRegion* hr);
734 735
735 736 // Incremental CSet Support
736 737
737 738 // The head of the incrementally built collection set.
738 739 HeapRegion* inc_cset_head() { return _inc_cset_head; }
739 740
740 741 // The tail of the incrementally built collection set.
741 742 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
742 743
743 744 // Initialize incremental collection set info.
744 745 void start_incremental_cset_building();
745 746
746 747 // Perform any final calculations on the incremental CSet fields
747 748 // before we can use them.
748 749 void finalize_incremental_cset_building();
749 750
750 751 void clear_incremental_cset() {
751 752 _inc_cset_head = NULL;
752 753 _inc_cset_tail = NULL;
753 754 }
754 755
755 756 // Stop adding regions to the incremental collection set
756 757 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
757 758
758 759 // Add information about hr to the aggregated information for the
759 760 // incrementally built collection set.
760 761 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
761 762
762 763 // Update information about hr in the aggregated information for
763 764 // the incrementally built collection set.
764 765 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
765 766
766 767 private:
767 768 // Update the incremental cset information when adding a region
768 769 // (should not be called directly).
769 770 void add_region_to_incremental_cset_common(HeapRegion* hr);
770 771
771 772 public:
772 773 // Add hr to the LHS of the incremental collection set.
773 774 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
774 775
775 776 // Add hr to the RHS of the incremental collection set.
776 777 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
777 778
778 779 #ifndef PRODUCT
779 780 void print_collection_set(HeapRegion* list_head, outputStream* st);
780 781 #endif // !PRODUCT
781 782
782 783 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
783 784 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
784 785 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
785 786
786 787 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
787 788 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
788 789 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
789 790
790 791 // This sets the initiate_conc_mark_if_possible() flag to start a
791 792 // new cycle, as long as we are not already in one. It's best if it
792 793 // is called during a safepoint when the test whether a cycle is in
793 794 // progress or not is stable.
794 795 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
795 796
↓ open down ↓ |
757 lines elided |
↑ open up ↑ |
796 797 // This is called at the very beginning of an evacuation pause (it
797 798 // has to be the first thing that the pause does). If
798 799 // initiate_conc_mark_if_possible() is true, and the concurrent
799 800 // marking thread has completed its work during the previous cycle,
800 801 // it will set during_initial_mark_pause() to so that the pause does
801 802 // the initial-mark work and start a marking cycle.
802 803 void decide_on_conc_mark_initiation();
803 804
804 805 // If an expansion would be appropriate, because recent GC overhead had
805 806 // exceeded the desired limit, return an amount to expand by.
806 - size_t expansion_amount();
807 + virtual size_t expansion_amount();
807 808
808 809 // Print tracing information.
809 810 void print_tracing_info() const;
810 811
811 812 // Print stats on young survival ratio
812 813 void print_yg_surv_rate_info() const;
813 814
814 815 void finished_recalculating_age_indexes(bool is_survivors) {
815 816 if (is_survivors) {
816 817 _survivor_surv_rate_group->finished_recalculating_age_indexes();
817 818 } else {
818 819 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
819 820 }
820 821 // do that for any other surv rate groups
821 822 }
822 823
823 824 size_t young_list_target_length() const { return _young_list_target_length; }
824 825
825 - bool is_young_list_full() {
826 - uint young_list_length = _g1->young_list()->length();
827 - uint young_list_target_length = _young_list_target_length;
828 - return young_list_length >= young_list_target_length;
829 - }
826 + bool is_young_list_full();
830 827
831 - bool can_expand_young_list() {
832 - uint young_list_length = _g1->young_list()->length();
833 - uint young_list_max_length = _young_list_max_length;
834 - return young_list_length < young_list_max_length;
835 - }
828 + bool can_expand_young_list();
836 829
837 830 uint young_list_max_length() {
838 831 return _young_list_max_length;
839 832 }
840 833
841 834 bool gcs_are_young() {
842 835 return _gcs_are_young;
843 836 }
844 837 void set_gcs_are_young(bool gcs_are_young) {
845 838 _gcs_are_young = gcs_are_young;
846 839 }
847 840
848 841 bool adaptive_young_list_length() {
849 842 return _young_gen_sizer->adaptive_young_list_length();
850 843 }
851 844
852 845 private:
853 846 //
854 847 // Survivor regions policy.
855 848 //
856 849
857 850 // Current tenuring threshold, set to 0 if the collector reaches the
858 851 // maximum amount of survivors regions.
859 852 uint _tenuring_threshold;
860 853
861 854 // The limit on the number of regions allocated for survivors.
862 855 uint _max_survivor_regions;
863 856
864 857 // For reporting purposes.
865 858 // The value of _heap_bytes_before_gc is also used to calculate
866 859 // the cost of copying.
867 860
868 861 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
869 862 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
870 863 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
871 864 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
872 865
873 866 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
874 867 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
875 868
876 869 // The amount of survivor regions after a collection.
877 870 uint _recorded_survivor_regions;
878 871 // List of survivor regions.
879 872 HeapRegion* _recorded_survivor_head;
880 873 HeapRegion* _recorded_survivor_tail;
881 874
882 875 ageTable _survivors_age_table;
883 876
884 877 public:
885 878 uint tenuring_threshold() const { return _tenuring_threshold; }
886 879
887 880 inline GCAllocPurpose
888 881 evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
889 882 if (age < _tenuring_threshold && src_region->is_young()) {
890 883 return GCAllocForSurvived;
891 884 } else {
892 885 return GCAllocForTenured;
893 886 }
894 887 }
895 888
896 889 inline bool track_object_age(GCAllocPurpose purpose) {
897 890 return purpose == GCAllocForSurvived;
898 891 }
899 892
900 893 static const uint REGIONS_UNLIMITED = (uint) -1;
901 894
902 895 uint max_regions(int purpose);
903 896
904 897 // The limit on regions for a particular purpose is reached.
905 898 void note_alloc_region_limit_reached(int purpose) {
906 899 if (purpose == GCAllocForSurvived) {
907 900 _tenuring_threshold = 0;
908 901 }
909 902 }
910 903
911 904 void note_start_adding_survivor_regions() {
912 905 _survivor_surv_rate_group->start_adding_regions();
913 906 }
914 907
915 908 void note_stop_adding_survivor_regions() {
916 909 _survivor_surv_rate_group->stop_adding_regions();
917 910 }
918 911
919 912 void record_survivor_regions(uint regions,
920 913 HeapRegion* head,
921 914 HeapRegion* tail) {
922 915 _recorded_survivor_regions = regions;
923 916 _recorded_survivor_head = head;
924 917 _recorded_survivor_tail = tail;
925 918 }
926 919
927 920 uint recorded_survivor_regions() {
928 921 return _recorded_survivor_regions;
929 922 }
930 923
931 924 void record_thread_age_table(ageTable* age_table) {
932 925 _survivors_age_table.merge_par(age_table);
933 926 }
934 927
935 928 void update_max_gc_locker_expansion();
936 929
937 930 // Calculates survivor space parameters.
938 931 void update_survivors_policy();
939 932
940 933 virtual void post_heap_initialize();
941 934 };
942 935
943 936 // This should move to some place more general...
944 937
945 938 // If we have "n" measurements, and we've kept track of their "sum" and the
946 939 // "sum_of_squares" of the measurements, this returns the variance of the
947 940 // sequence.
948 941 inline double variance(int n, double sum_of_squares, double sum) {
949 942 double n_d = (double)n;
950 943 double avg = sum/n_d;
951 944 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
952 945 }
953 946
954 947 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
↓ open down ↓ |
109 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX