rev 9277 : imported patch 8140597-forcing-initial-mark-causes-abort-mixed-collections
rev 9278 : imported patch 8139874-after-full-gc-next-gc-is-always-young-only
rev 9279 : imported patch 8138740-start-initial-mark-right-after-mixed-gc-if-needed
rev 9281 : [mq]: 8140689-skip-last-young-if-nothing-to-do-in-mixed
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMark.hpp"
28 #include "gc/g1/concurrentMarkThread.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1ErgoVerbose.hpp"
32 #include "gc/g1/g1GCPhaseTimes.hpp"
33 #include "gc/g1/g1Log.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "gc/shared/gcPolicyCounters.hpp"
37 #include "runtime/arguments.hpp"
38 #include "runtime/java.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "utilities/debug.hpp"
41
42 // Different defaults for different number of GC threads
43 // They were chosen by running GCOld and SPECjbb on debris with different
44 // numbers of GC threads and choosing them based on the results
45
46 // all the same
47 static double rs_length_diff_defaults[] = {
48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
49 };
50
51 static double cost_per_card_ms_defaults[] = {
52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
53 };
54
55 // all the same
56 static double young_cards_per_entry_ratio_defaults[] = {
57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
58 };
59
60 static double cost_per_entry_ms_defaults[] = {
61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
62 };
63
64 static double cost_per_byte_ms_defaults[] = {
65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
66 };
67
68 // these should be pretty consistent
69 static double constant_other_time_ms_defaults[] = {
70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
71 };
72
73
74 static double young_other_cost_per_region_ms_defaults[] = {
75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
76 };
77
78 static double non_young_other_cost_per_region_ms_defaults[] = {
79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
80 };
81
82 G1CollectorPolicy::G1CollectorPolicy() :
83 _predictor(G1ConfidencePercent / 100.0),
84 _parallel_gc_threads(ParallelGCThreads),
85
86 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
87 _stop_world_start(0.0),
88
89 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
90 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91
92 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
93 _prev_collection_pause_end_ms(0.0),
94 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _non_young_other_cost_per_region_ms_seq(
106 new TruncatedSeq(TruncatedSeqLength)),
107
108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
110
111 _pause_time_target_ms((double) MaxGCPauseMillis),
112
113 _recent_prev_end_times_for_all_gcs_sec(
114 new TruncatedSeq(NumPrevPausesForHeuristics)),
115
116 _recent_avg_pause_time_ratio(0.0),
117 _rs_lengths_prediction(0),
118 _max_survivor_regions(0),
119
120 _eden_used_bytes_before_gc(0),
121 _survivor_used_bytes_before_gc(0),
122 _heap_used_bytes_before_gc(0),
123 _metaspace_used_bytes_before_gc(0),
124 _eden_capacity_bytes_before_gc(0),
125 _heap_capacity_bytes_before_gc(0),
126
127 _eden_cset_region_length(0),
128 _survivor_cset_region_length(0),
129 _old_cset_region_length(0),
130
131 _collection_set(NULL),
132 _collection_set_bytes_used_before(0),
133
134 // Incremental CSet attributes
135 _inc_cset_build_state(Inactive),
136 _inc_cset_head(NULL),
137 _inc_cset_tail(NULL),
138 _inc_cset_bytes_used_before(0),
139 _inc_cset_max_finger(NULL),
140 _inc_cset_recorded_rs_lengths(0),
141 _inc_cset_recorded_rs_lengths_diffs(0),
142 _inc_cset_predicted_elapsed_time_ms(0.0),
143 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
144
145 // add here any more surv rate groups
146 _recorded_survivor_regions(0),
147 _recorded_survivor_head(NULL),
148 _recorded_survivor_tail(NULL),
149 _survivors_age_table(true),
150
151 _gc_overhead_perc(0.0) {
152
153 // SurvRateGroups below must be initialized after the predictor because they
154 // indirectly use it through this object passed to their constructor.
155 _short_lived_surv_rate_group =
156 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
157 _survivor_surv_rate_group =
158 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
159
160 // Set up the region size and associated fields. Given that the
161 // policy is created before the heap, we have to set this up here,
162 // so it's done as soon as possible.
163
164 // It would have been natural to pass initial_heap_byte_size() and
165 // max_heap_byte_size() to setup_heap_region_size() but those have
166 // not been set up at this point since they should be aligned with
167 // the region size. So, there is a circular dependency here. We base
168 // the region size on the heap size, but the heap size should be
169 // aligned with the region size. To get around this we use the
170 // unaligned values for the heap.
171 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
172 HeapRegionRemSet::setup_remset_size();
173
174 G1ErgoVerbose::initialize();
175 if (PrintAdaptiveSizePolicy) {
176 // Currently, we only use a single switch for all the heuristics.
177 G1ErgoVerbose::set_enabled(true);
178 // Given that we don't currently have a verboseness level
179 // parameter, we'll hardcode this to high. This can be easily
180 // changed in the future.
181 G1ErgoVerbose::set_level(ErgoHigh);
182 } else {
183 G1ErgoVerbose::set_enabled(false);
184 }
185
186 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
187 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
188
189 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
190
191 int index = MIN2(_parallel_gc_threads - 1, 7);
192
193 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
194 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
195 _cost_scan_hcc_seq->add(0.0);
196 _young_cards_per_entry_ratio_seq->add(
197 young_cards_per_entry_ratio_defaults[index]);
198 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
199 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
200 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
201 _young_other_cost_per_region_ms_seq->add(
202 young_other_cost_per_region_ms_defaults[index]);
203 _non_young_other_cost_per_region_ms_seq->add(
204 non_young_other_cost_per_region_ms_defaults[index]);
205
206 // Below, we might need to calculate the pause time target based on
207 // the pause interval. When we do so we are going to give G1 maximum
208 // flexibility and allow it to do pauses when it needs to. So, we'll
209 // arrange that the pause interval to be pause time target + 1 to
210 // ensure that a) the pause time target is maximized with respect to
211 // the pause interval and b) we maintain the invariant that pause
212 // time target < pause interval. If the user does not want this
213 // maximum flexibility, they will have to set the pause interval
214 // explicitly.
215
216 // First make sure that, if either parameter is set, its value is
217 // reasonable.
218 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
219 if (MaxGCPauseMillis < 1) {
220 vm_exit_during_initialization("MaxGCPauseMillis should be "
221 "greater than 0");
222 }
223 }
224 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
225 if (GCPauseIntervalMillis < 1) {
226 vm_exit_during_initialization("GCPauseIntervalMillis should be "
227 "greater than 0");
228 }
229 }
230
231 // Then, if the pause time target parameter was not set, set it to
232 // the default value.
233 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
234 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
235 // The default pause time target in G1 is 200ms
236 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
237 } else {
238 // We do not allow the pause interval to be set without the
239 // pause time target
240 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
241 "without setting MaxGCPauseMillis");
242 }
243 }
244
245 // Then, if the interval parameter was not set, set it according to
246 // the pause time target (this will also deal with the case when the
247 // pause time target is the default value).
248 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
249 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
250 }
251
252 // Finally, make sure that the two parameters are consistent.
253 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
254 char buffer[256];
255 jio_snprintf(buffer, 256,
256 "MaxGCPauseMillis (%u) should be less than "
257 "GCPauseIntervalMillis (%u)",
258 MaxGCPauseMillis, GCPauseIntervalMillis);
259 vm_exit_during_initialization(buffer);
260 }
261
262 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
263 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
264 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
265
266 // start conservatively (around 50ms is about right)
267 _concurrent_mark_remark_times_ms->add(0.05);
268 _concurrent_mark_cleanup_times_ms->add(0.20);
269 _tenuring_threshold = MaxTenuringThreshold;
270
271 assert(GCTimeRatio > 0,
272 "we should have set it to a default value set_g1_gc_flags() "
273 "if a user set it to 0");
274 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
275
276 uintx reserve_perc = G1ReservePercent;
277 // Put an artificial ceiling on this so that it's not set to a silly value.
278 if (reserve_perc > 50) {
279 reserve_perc = 50;
280 warning("G1ReservePercent is set to a value that is too large, "
281 "it's been updated to " UINTX_FORMAT, reserve_perc);
282 }
283 _reserve_factor = (double) reserve_perc / 100.0;
284 // This will be set when the heap is expanded
285 // for the first time during initialization.
286 _reserve_regions = 0;
287
288 _collectionSetChooser = new CollectionSetChooser();
289 }
290
291 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
292 return _predictor.get_new_prediction(seq);
293 }
294
295 void G1CollectorPolicy::initialize_alignments() {
296 _space_alignment = HeapRegion::GrainBytes;
297 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
298 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
299 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
300 }
301
302 void G1CollectorPolicy::initialize_flags() {
303 if (G1HeapRegionSize != HeapRegion::GrainBytes) {
304 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
305 }
306
307 if (SurvivorRatio < 1) {
308 vm_exit_during_initialization("Invalid survivor ratio specified");
309 }
310 CollectorPolicy::initialize_flags();
311 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
312 }
313
314 void G1CollectorPolicy::post_heap_initialize() {
315 uintx max_regions = G1CollectedHeap::heap()->max_regions();
316 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
317 if (max_young_size != MaxNewSize) {
318 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
319 }
320 }
321
322 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
323
324 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
325 _min_desired_young_length(0), _max_desired_young_length(0) {
326 if (FLAG_IS_CMDLINE(NewRatio)) {
327 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
328 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
329 } else {
330 _sizer_kind = SizerNewRatio;
331 _adaptive_size = false;
332 return;
333 }
334 }
335
336 if (NewSize > MaxNewSize) {
337 if (FLAG_IS_CMDLINE(MaxNewSize)) {
338 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
339 "A new max generation size of " SIZE_FORMAT "k will be used.",
340 NewSize/K, MaxNewSize/K, NewSize/K);
341 }
342 MaxNewSize = NewSize;
343 }
344
345 if (FLAG_IS_CMDLINE(NewSize)) {
346 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
347 1U);
348 if (FLAG_IS_CMDLINE(MaxNewSize)) {
349 _max_desired_young_length =
350 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
351 1U);
352 _sizer_kind = SizerMaxAndNewSize;
353 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
354 } else {
355 _sizer_kind = SizerNewSizeOnly;
356 }
357 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
358 _max_desired_young_length =
359 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
360 1U);
361 _sizer_kind = SizerMaxNewSizeOnly;
362 }
363 }
364
365 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
366 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
367 return MAX2(1U, default_value);
368 }
369
370 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
371 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
372 return MAX2(1U, default_value);
373 }
374
375 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
376 assert(number_of_heap_regions > 0, "Heap must be initialized");
377
378 switch (_sizer_kind) {
379 case SizerDefaults:
380 *min_young_length = calculate_default_min_length(number_of_heap_regions);
381 *max_young_length = calculate_default_max_length(number_of_heap_regions);
382 break;
383 case SizerNewSizeOnly:
384 *max_young_length = calculate_default_max_length(number_of_heap_regions);
385 *max_young_length = MAX2(*min_young_length, *max_young_length);
386 break;
387 case SizerMaxNewSizeOnly:
388 *min_young_length = calculate_default_min_length(number_of_heap_regions);
389 *min_young_length = MIN2(*min_young_length, *max_young_length);
390 break;
391 case SizerMaxAndNewSize:
392 // Do nothing. Values set on the command line, don't update them at runtime.
393 break;
394 case SizerNewRatio:
395 *min_young_length = number_of_heap_regions / (NewRatio + 1);
396 *max_young_length = *min_young_length;
397 break;
398 default:
399 ShouldNotReachHere();
400 }
401
402 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
403 }
404
405 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
406 // We need to pass the desired values because recalculation may not update these
407 // values in some cases.
408 uint temp = _min_desired_young_length;
409 uint result = _max_desired_young_length;
410 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
411 return result;
412 }
413
414 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
415 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
416 &_max_desired_young_length);
417 }
418
419 void G1CollectorPolicy::init() {
420 // Set aside an initial future to_space.
421 _g1 = G1CollectedHeap::heap();
422
423 assert(Heap_lock->owned_by_self(), "Locking discipline.");
424
425 initialize_gc_policy_counters();
426
427 if (adaptive_young_list_length()) {
428 _young_list_fixed_length = 0;
429 } else {
430 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
431 }
432 _free_regions_at_end_of_collection = _g1->num_free_regions();
433
434 update_young_list_max_and_target_length();
435 // We may immediately start allocating regions and placing them on the
436 // collection set list. Initialize the per-collection set info
437 start_incremental_cset_building();
438 }
439
440 void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
441 phase_times()->note_gc_start(num_active_workers);
442 }
443
444 // Create the jstat counters for the policy.
445 void G1CollectorPolicy::initialize_gc_policy_counters() {
446 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
447 }
448
449 bool G1CollectorPolicy::predict_will_fit(uint young_length,
450 double base_time_ms,
451 uint base_free_regions,
452 double target_pause_time_ms) const {
453 if (young_length >= base_free_regions) {
454 // end condition 1: not enough space for the young regions
455 return false;
456 }
457
458 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
459 size_t bytes_to_copy =
460 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
461 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
462 double young_other_time_ms = predict_young_other_time_ms(young_length);
463 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
464 if (pause_time_ms > target_pause_time_ms) {
465 // end condition 2: prediction is over the target pause time
466 return false;
467 }
468
469 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
470 if ((2.0 /* magic */ * _predictor.sigma()) * bytes_to_copy > free_bytes) {
471 // end condition 3: out-of-space (conservatively!)
472 return false;
473 }
474
475 // success!
476 return true;
477 }
478
479 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
480 // re-calculate the necessary reserve
481 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
482 // We use ceiling so that if reserve_regions_d is > 0.0 (but
483 // smaller than 1.0) we'll get 1.
484 _reserve_regions = (uint) ceil(reserve_regions_d);
485
486 _young_gen_sizer->heap_size_changed(new_number_of_regions);
487 }
488
489 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
490 uint base_min_length) const {
491 uint desired_min_length = 0;
492 if (adaptive_young_list_length()) {
493 if (_alloc_rate_ms_seq->num() > 3) {
494 double now_sec = os::elapsedTime();
495 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
496 double alloc_rate_ms = predict_alloc_rate_ms();
497 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
498 } else {
499 // otherwise we don't have enough info to make the prediction
500 }
501 }
502 desired_min_length += base_min_length;
503 // make sure we don't go below any user-defined minimum bound
504 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
505 }
506
507 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
508 // Here, we might want to also take into account any additional
509 // constraints (i.e., user-defined minimum bound). Currently, we
510 // effectively don't set this bound.
511 return _young_gen_sizer->max_desired_young_length();
512 }
513
514 void G1CollectorPolicy::update_young_list_max_and_target_length() {
515 update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
516 }
517
518 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
519 update_young_list_target_length(rs_lengths);
520 update_max_gc_locker_expansion();
521 }
522
523 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
524 _young_list_target_length = bounded_young_list_target_length(rs_lengths);
525 }
526
527 void G1CollectorPolicy::update_young_list_target_length() {
528 update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
529 }
530
531 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
532 // Calculate the absolute and desired min bounds.
533
534 // This is how many young regions we already have (currently: the survivors).
535 uint base_min_length = recorded_survivor_regions();
536 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
537 // This is the absolute minimum young length. Ensure that we
538 // will at least have one eden region available for allocation.
539 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
540 // If we shrank the young list target it should not shrink below the current size.
541 desired_min_length = MAX2(desired_min_length, absolute_min_length);
542 // Calculate the absolute and desired max bounds.
543
544 // We will try our best not to "eat" into the reserve.
545 uint absolute_max_length = 0;
546 if (_free_regions_at_end_of_collection > _reserve_regions) {
547 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
548 }
549 uint desired_max_length = calculate_young_list_desired_max_length();
550 if (desired_max_length > absolute_max_length) {
551 desired_max_length = absolute_max_length;
552 }
553
554 uint young_list_target_length = 0;
555 if (adaptive_young_list_length()) {
556 if (collector_state()->gcs_are_young()) {
557 young_list_target_length =
558 calculate_young_list_target_length(rs_lengths,
559 base_min_length,
560 desired_min_length,
561 desired_max_length);
562 } else {
563 // Don't calculate anything and let the code below bound it to
564 // the desired_min_length, i.e., do the next GC as soon as
565 // possible to maximize how many old regions we can add to it.
566 }
567 } else {
568 // The user asked for a fixed young gen so we'll fix the young gen
569 // whether the next GC is young or mixed.
570 young_list_target_length = _young_list_fixed_length;
571 }
572
573 // Make sure we don't go over the desired max length, nor under the
574 // desired min length. In case they clash, desired_min_length wins
575 // which is why that test is second.
576 if (young_list_target_length > desired_max_length) {
577 young_list_target_length = desired_max_length;
578 }
579 if (young_list_target_length < desired_min_length) {
580 young_list_target_length = desired_min_length;
581 }
582
583 assert(young_list_target_length > recorded_survivor_regions(),
584 "we should be able to allocate at least one eden region");
585 assert(young_list_target_length >= absolute_min_length, "post-condition");
586
587 return young_list_target_length;
588 }
589
590 uint
591 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
592 uint base_min_length,
593 uint desired_min_length,
594 uint desired_max_length) const {
595 assert(adaptive_young_list_length(), "pre-condition");
596 assert(collector_state()->gcs_are_young(), "only call this for young GCs");
597
598 // In case some edge-condition makes the desired max length too small...
599 if (desired_max_length <= desired_min_length) {
600 return desired_min_length;
601 }
602
603 // We'll adjust min_young_length and max_young_length not to include
604 // the already allocated young regions (i.e., so they reflect the
605 // min and max eden regions we'll allocate). The base_min_length
606 // will be reflected in the predictions by the
607 // survivor_regions_evac_time prediction.
608 assert(desired_min_length > base_min_length, "invariant");
609 uint min_young_length = desired_min_length - base_min_length;
610 assert(desired_max_length > base_min_length, "invariant");
611 uint max_young_length = desired_max_length - base_min_length;
612
613 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
614 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
615 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
616 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
617 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
618 double base_time_ms =
619 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
620 survivor_regions_evac_time;
621 uint available_free_regions = _free_regions_at_end_of_collection;
622 uint base_free_regions = 0;
623 if (available_free_regions > _reserve_regions) {
624 base_free_regions = available_free_regions - _reserve_regions;
625 }
626
627 // Here, we will make sure that the shortest young length that
628 // makes sense fits within the target pause time.
629
630 if (predict_will_fit(min_young_length, base_time_ms,
631 base_free_regions, target_pause_time_ms)) {
632 // The shortest young length will fit into the target pause time;
633 // we'll now check whether the absolute maximum number of young
634 // regions will fit in the target pause time. If not, we'll do
635 // a binary search between min_young_length and max_young_length.
636 if (predict_will_fit(max_young_length, base_time_ms,
637 base_free_regions, target_pause_time_ms)) {
638 // The maximum young length will fit into the target pause time.
639 // We are done so set min young length to the maximum length (as
640 // the result is assumed to be returned in min_young_length).
641 min_young_length = max_young_length;
642 } else {
643 // The maximum possible number of young regions will not fit within
644 // the target pause time so we'll search for the optimal
645 // length. The loop invariants are:
646 //
647 // min_young_length < max_young_length
648 // min_young_length is known to fit into the target pause time
649 // max_young_length is known not to fit into the target pause time
650 //
651 // Going into the loop we know the above hold as we've just
652 // checked them. Every time around the loop we check whether
653 // the middle value between min_young_length and
654 // max_young_length fits into the target pause time. If it
655 // does, it becomes the new min. If it doesn't, it becomes
656 // the new max. This way we maintain the loop invariants.
657
658 assert(min_young_length < max_young_length, "invariant");
659 uint diff = (max_young_length - min_young_length) / 2;
660 while (diff > 0) {
661 uint young_length = min_young_length + diff;
662 if (predict_will_fit(young_length, base_time_ms,
663 base_free_regions, target_pause_time_ms)) {
664 min_young_length = young_length;
665 } else {
666 max_young_length = young_length;
667 }
668 assert(min_young_length < max_young_length, "invariant");
669 diff = (max_young_length - min_young_length) / 2;
670 }
671 // The results is min_young_length which, according to the
672 // loop invariants, should fit within the target pause time.
673
674 // These are the post-conditions of the binary search above:
675 assert(min_young_length < max_young_length,
676 "otherwise we should have discovered that max_young_length "
677 "fits into the pause target and not done the binary search");
678 assert(predict_will_fit(min_young_length, base_time_ms,
679 base_free_regions, target_pause_time_ms),
680 "min_young_length, the result of the binary search, should "
681 "fit into the pause target");
682 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
683 base_free_regions, target_pause_time_ms),
684 "min_young_length, the result of the binary search, should be "
685 "optimal, so no larger length should fit into the pause target");
686 }
687 } else {
688 // Even the minimum length doesn't fit into the pause time
689 // target, return it as the result nevertheless.
690 }
691 return base_min_length + min_young_length;
692 }
693
694 double G1CollectorPolicy::predict_survivor_regions_evac_time() const {
695 double survivor_regions_evac_time = 0.0;
696 for (HeapRegion * r = _recorded_survivor_head;
697 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
698 r = r->get_next_young_region()) {
699 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
700 }
701 return survivor_regions_evac_time;
702 }
703
704 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
705 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
706
707 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
708 if (rs_lengths > _rs_lengths_prediction) {
709 // add 10% to avoid having to recalculate often
710 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
711 update_rs_lengths_prediction(rs_lengths_prediction);
712
713 update_young_list_max_and_target_length(rs_lengths_prediction);
714 }
715 }
716
717 void G1CollectorPolicy::update_rs_lengths_prediction() {
718 update_rs_lengths_prediction(get_new_prediction(_rs_lengths_seq));
719 }
720
721 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
722 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
723 _rs_lengths_prediction = prediction;
724 }
725 }
726
727 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
728 bool is_tlab,
729 bool* gc_overhead_limit_was_exceeded) {
730 guarantee(false, "Not using this policy feature yet.");
731 return NULL;
732 }
733
734 // This method controls how a collector handles one or more
735 // of its generations being fully allocated.
736 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
737 bool is_tlab) {
738 guarantee(false, "Not using this policy feature yet.");
739 return NULL;
740 }
741
742
743 #ifndef PRODUCT
744 bool G1CollectorPolicy::verify_young_ages() {
745 HeapRegion* head = _g1->young_list()->first_region();
746 return
747 verify_young_ages(head, _short_lived_surv_rate_group);
748 // also call verify_young_ages on any additional surv rate groups
749 }
750
751 bool
752 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
753 SurvRateGroup *surv_rate_group) {
754 guarantee( surv_rate_group != NULL, "pre-condition" );
755
756 const char* name = surv_rate_group->name();
757 bool ret = true;
758 int prev_age = -1;
759
760 for (HeapRegion* curr = head;
761 curr != NULL;
762 curr = curr->get_next_young_region()) {
763 SurvRateGroup* group = curr->surv_rate_group();
764 if (group == NULL && !curr->is_survivor()) {
765 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
766 ret = false;
767 }
768
769 if (surv_rate_group == group) {
770 int age = curr->age_in_surv_rate_group();
771
772 if (age < 0) {
773 gclog_or_tty->print_cr("## %s: encountered negative age", name);
774 ret = false;
775 }
776
777 if (age <= prev_age) {
778 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
779 "(%d, %d)", name, age, prev_age);
780 ret = false;
781 }
782 prev_age = age;
783 }
784 }
785
786 return ret;
787 }
788 #endif // PRODUCT
789
790 void G1CollectorPolicy::record_full_collection_start() {
791 _full_collection_start_sec = os::elapsedTime();
792 record_heap_size_info_at_start(true /* full */);
793 // Release the future to-space so that it is available for compaction into.
794 collector_state()->set_full_collection(true);
795 }
796
797 void G1CollectorPolicy::record_full_collection_end() {
798 // Consider this like a collection pause for the purposes of allocation
799 // since last pause.
800 double end_sec = os::elapsedTime();
801 double full_gc_time_sec = end_sec - _full_collection_start_sec;
802 double full_gc_time_ms = full_gc_time_sec * 1000.0;
803
804 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
805
806 update_recent_gc_times(end_sec, full_gc_time_ms);
807
808 collector_state()->set_full_collection(false);
809
810 // "Nuke" the heuristics that control the young/mixed GC
811 // transitions and make sure we start with young GCs after the Full GC.
812 collector_state()->set_gcs_are_young(true);
813 collector_state()->set_last_young_gc(false);
814 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
815 collector_state()->set_during_initial_mark_pause(false);
816 collector_state()->set_in_marking_window(false);
817 collector_state()->set_in_marking_window_im(false);
818
819 _short_lived_surv_rate_group->start_adding_regions();
820 // also call this on any additional surv rate groups
821
822 record_survivor_regions(0, NULL, NULL);
823
824 _free_regions_at_end_of_collection = _g1->num_free_regions();
825 // Reset survivors SurvRateGroup.
826 _survivor_surv_rate_group->reset();
827 update_young_list_max_and_target_length();
828 update_rs_lengths_prediction();
829 _collectionSetChooser->clear();
830 }
831
832 void G1CollectorPolicy::record_stop_world_start() {
833 _stop_world_start = os::elapsedTime();
834 }
835
836 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
837 // We only need to do this here as the policy will only be applied
838 // to the GC we're about to start. so, no point is calculating this
839 // every time we calculate / recalculate the target young length.
840 update_survivors_policy();
841
842 assert(_g1->used() == _g1->recalculate_used(),
843 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
844 _g1->used(), _g1->recalculate_used());
845
846 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
847 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
848 _stop_world_start = 0.0;
849
850 record_heap_size_info_at_start(false /* full */);
851
852 phase_times()->record_cur_collection_start_sec(start_time_sec);
853 _pending_cards = _g1->pending_card_num();
854
855 _collection_set_bytes_used_before = 0;
856 _bytes_copied_during_gc = 0;
857
858 collector_state()->set_last_gc_was_young(false);
859
860 // do that for any other surv rate groups
861 _short_lived_surv_rate_group->stop_adding_regions();
862 _survivors_age_table.clear();
863
864 assert( verify_young_ages(), "region age verification" );
865 }
866
867 void G1CollectorPolicy::record_concurrent_mark_init_end(double
868 mark_init_elapsed_time_ms) {
869 collector_state()->set_during_marking(true);
870 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
871 collector_state()->set_during_initial_mark_pause(false);
872 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
873 }
874
875 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
876 _mark_remark_start_sec = os::elapsedTime();
877 collector_state()->set_during_marking(false);
878 }
879
880 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
881 double end_time_sec = os::elapsedTime();
882 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
883 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
884 _cur_mark_stop_world_time_ms += elapsed_time_ms;
885 _prev_collection_pause_end_ms += elapsed_time_ms;
886
887 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
888 }
889
890 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
891 _mark_cleanup_start_sec = os::elapsedTime();
892 }
893
894 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
895 collector_state()->set_last_young_gc(true);
896 collector_state()->set_in_marking_window(false);
897 }
898
899 void G1CollectorPolicy::record_concurrent_pause() {
900 if (_stop_world_start > 0.0) {
901 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
902 _trace_young_gen_time_data.record_yield_time(yield_ms);
903 }
904 }
905
906 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
907 return phase_times()->average_time_ms(phase);
908 }
909
910 double G1CollectorPolicy::young_other_time_ms() const {
911 return phase_times()->young_cset_choice_time_ms() +
912 phase_times()->young_free_cset_time_ms();
913 }
914
915 double G1CollectorPolicy::non_young_other_time_ms() const {
916 return phase_times()->non_young_cset_choice_time_ms() +
917 phase_times()->non_young_free_cset_time_ms();
918
919 }
920
921 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
922 return pause_time_ms -
923 average_time_ms(G1GCPhaseTimes::UpdateRS) -
924 average_time_ms(G1GCPhaseTimes::ScanRS) -
925 average_time_ms(G1GCPhaseTimes::ObjCopy) -
926 average_time_ms(G1GCPhaseTimes::Termination);
927 }
928
929 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
930 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
931 }
932
933 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
934 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
935 }
936
937 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
938 if (about_to_start_mixed_phase()) {
939 return false;
940 }
941
942 size_t marking_initiating_used_threshold =
943 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
944 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
945 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
946
947 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
948 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
949 ergo_verbose5(ErgoConcCycles,
950 "request concurrent cycle initiation",
951 ergo_format_reason("occupancy higher than threshold")
952 ergo_format_byte("occupancy")
953 ergo_format_byte("allocation request")
954 ergo_format_byte_perc("threshold")
955 ergo_format_str("source"),
956 cur_used_bytes,
957 alloc_byte_size,
958 marking_initiating_used_threshold,
959 (double) InitiatingHeapOccupancyPercent,
960 source);
961 return true;
962 } else {
963 ergo_verbose5(ErgoConcCycles,
964 "do not request concurrent cycle initiation",
965 ergo_format_reason("still doing mixed collections")
966 ergo_format_byte("occupancy")
967 ergo_format_byte("allocation request")
968 ergo_format_byte_perc("threshold")
969 ergo_format_str("source"),
970 cur_used_bytes,
971 alloc_byte_size,
972 marking_initiating_used_threshold,
973 (double) InitiatingHeapOccupancyPercent,
974 source);
975 }
976 }
977
978 return false;
979 }
980
981 // Anything below that is considered to be zero
982 #define MIN_TIMER_GRANULARITY 0.0000001
983
984 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
985 double end_time_sec = os::elapsedTime();
986 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
987 "otherwise, the subtraction below does not make sense");
988 size_t rs_size =
989 _cur_collection_pause_used_regions_at_start - cset_region_length();
990 size_t cur_used_bytes = _g1->used();
991 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
992 bool last_pause_included_initial_mark = false;
993 bool update_stats = !_g1->evacuation_failed();
994
995 #ifndef PRODUCT
996 if (G1YoungSurvRateVerbose) {
997 gclog_or_tty->cr();
998 _short_lived_surv_rate_group->print();
999 // do that for any other surv rate groups too
1000 }
1001 #endif // PRODUCT
1002
1003 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1004 if (last_pause_included_initial_mark) {
1005 record_concurrent_mark_init_end(0.0);
1006 } else {
1007 maybe_start_marking();
1008 }
1009
1010 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
1011
1012 if (update_stats) {
1013 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1014 // this is where we update the allocation rate of the application
1015 double app_time_ms =
1016 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1017 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1018 // This usually happens due to the timer not having the required
1019 // granularity. Some Linuxes are the usual culprits.
1020 // We'll just set it to something (arbitrarily) small.
1021 app_time_ms = 1.0;
1022 }
1023 // We maintain the invariant that all objects allocated by mutator
1024 // threads will be allocated out of eden regions. So, we can use
1025 // the eden region number allocated since the previous GC to
1026 // calculate the application's allocate rate. The only exception
1027 // to that is humongous objects that are allocated separately. But
1028 // given that humongous object allocations do not really affect
1029 // either the pause's duration nor when the next pause will take
1030 // place we can safely ignore them here.
1031 uint regions_allocated = eden_cset_region_length();
1032 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1033 _alloc_rate_ms_seq->add(alloc_rate_ms);
1034
1035 double interval_ms =
1036 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1037 update_recent_gc_times(end_time_sec, pause_time_ms);
1038 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1039 if (recent_avg_pause_time_ratio() < 0.0 ||
1040 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1041 #ifndef PRODUCT
1042 // Dump info to allow post-facto debugging
1043 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
1044 gclog_or_tty->print_cr("-------------------------------------------");
1045 gclog_or_tty->print_cr("Recent GC Times (ms):");
1046 _recent_gc_times_ms->dump();
1047 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1048 _recent_prev_end_times_for_all_gcs_sec->dump();
1049 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1050 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1051 // In debug mode, terminate the JVM if the user wants to debug at this point.
1052 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1053 #endif // !PRODUCT
1054 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1055 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1056 if (_recent_avg_pause_time_ratio < 0.0) {
1057 _recent_avg_pause_time_ratio = 0.0;
1058 } else {
1059 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1060 _recent_avg_pause_time_ratio = 1.0;
1061 }
1062 }
1063 }
1064
1065 bool new_in_marking_window = collector_state()->in_marking_window();
1066 bool new_in_marking_window_im = false;
1067 if (last_pause_included_initial_mark) {
1068 new_in_marking_window = true;
1069 new_in_marking_window_im = true;
1070 }
1071
1072 if (collector_state()->last_young_gc()) {
1073 // This is supposed to to be the "last young GC" before we start
1074 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1075 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1076
1077 if (next_gc_should_be_mixed("start mixed GCs",
1078 "do not start mixed GCs")) {
1079 collector_state()->set_gcs_are_young(false);
1080 }
1081
1082 collector_state()->set_last_young_gc(false);
1083 }
1084
1085 if (!collector_state()->last_gc_was_young()) {
1086 // This is a mixed GC. Here we decide whether to continue doing
1087 // mixed GCs or not.
1088
1089 if (!next_gc_should_be_mixed("continue mixed GCs",
1090 "do not continue mixed GCs")) {
1091 collector_state()->set_gcs_are_young(true);
1092
1093 maybe_start_marking();
1094 }
1095 }
1096
1097 _short_lived_surv_rate_group->start_adding_regions();
1098 // Do that for any other surv rate groups
1099
1100 if (update_stats) {
1101 double cost_per_card_ms = 0.0;
1102 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1103 if (_pending_cards > 0) {
1104 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1105 _cost_per_card_ms_seq->add(cost_per_card_ms);
1106 }
1107 _cost_scan_hcc_seq->add(cost_scan_hcc);
1108
1109 double cost_per_entry_ms = 0.0;
1110 if (cards_scanned > 10) {
1111 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1112 if (collector_state()->last_gc_was_young()) {
1113 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1114 } else {
1115 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1116 }
1117 }
1118
1119 if (_max_rs_lengths > 0) {
1120 double cards_per_entry_ratio =
1121 (double) cards_scanned / (double) _max_rs_lengths;
1122 if (collector_state()->last_gc_was_young()) {
1123 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1124 } else {
1125 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1126 }
1127 }
1128
1129 // This is defensive. For a while _max_rs_lengths could get
1130 // smaller than _recorded_rs_lengths which was causing
1131 // rs_length_diff to get very large and mess up the RSet length
1132 // predictions. The reason was unsafe concurrent updates to the
1133 // _inc_cset_recorded_rs_lengths field which the code below guards
1134 // against (see CR 7118202). This bug has now been fixed (see CR
1135 // 7119027). However, I'm still worried that
1136 // _inc_cset_recorded_rs_lengths might still end up somewhat
1137 // inaccurate. The concurrent refinement thread calculates an
1138 // RSet's length concurrently with other CR threads updating it
1139 // which might cause it to calculate the length incorrectly (if,
1140 // say, it's in mid-coarsening). So I'll leave in the defensive
1141 // conditional below just in case.
1142 size_t rs_length_diff = 0;
1143 if (_max_rs_lengths > _recorded_rs_lengths) {
1144 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1145 }
1146 _rs_length_diff_seq->add((double) rs_length_diff);
1147
1148 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1149 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1150 double cost_per_byte_ms = 0.0;
1151
1152 if (copied_bytes > 0) {
1153 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1154 if (collector_state()->in_marking_window()) {
1155 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1156 } else {
1157 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1158 }
1159 }
1160
1161 if (young_cset_region_length() > 0) {
1162 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1163 young_cset_region_length());
1164 }
1165
1166 if (old_cset_region_length() > 0) {
1167 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1168 old_cset_region_length());
1169 }
1170
1171 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1172
1173 _pending_cards_seq->add((double) _pending_cards);
1174 _rs_lengths_seq->add((double) _max_rs_lengths);
1175 }
1176
1177 collector_state()->set_in_marking_window(new_in_marking_window);
1178 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1179 _free_regions_at_end_of_collection = _g1->num_free_regions();
1180 update_young_list_max_and_target_length();
1181 update_rs_lengths_prediction();
1182
1183 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1184 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1185
1186 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1187
1188 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1189 ergo_verbose2(ErgoTiming,
1190 "adjust concurrent refinement thresholds",
1191 ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1192 ergo_format_ms("Update RS time goal")
1193 ergo_format_ms("Scan HCC time"),
1194 update_rs_time_goal_ms,
1195 scan_hcc_time_ms);
1196
1197 update_rs_time_goal_ms = 0;
1198 } else {
1199 update_rs_time_goal_ms -= scan_hcc_time_ms;
1200 }
1201 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1202 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1203 update_rs_time_goal_ms);
1204
1205 _collectionSetChooser->verify();
1206 }
1207
1208 #define EXT_SIZE_FORMAT "%.1f%s"
1209 #define EXT_SIZE_PARAMS(bytes) \
1210 byte_size_in_proper_unit((double)(bytes)), \
1211 proper_unit_for_byte_size((bytes))
1212
1213 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1214 YoungList* young_list = _g1->young_list();
1215 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1216 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1217 _heap_capacity_bytes_before_gc = _g1->capacity();
1218 _heap_used_bytes_before_gc = _g1->used();
1219 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1220
1221 _eden_capacity_bytes_before_gc =
1222 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1223
1224 if (full) {
1225 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1226 }
1227 }
1228
1229 void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const {
1230 size_t bytes_after = _g1->used();
1231 size_t capacity = _g1->capacity();
1232
1233 gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
1234 byte_size_in_proper_unit(bytes_before),
1235 proper_unit_for_byte_size(bytes_before),
1236 byte_size_in_proper_unit(bytes_after),
1237 proper_unit_for_byte_size(bytes_after),
1238 byte_size_in_proper_unit(capacity),
1239 proper_unit_for_byte_size(capacity));
1240 }
1241
1242 void G1CollectorPolicy::print_heap_transition() const {
1243 print_heap_transition(_heap_used_bytes_before_gc);
1244 }
1245
1246 void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
1247 YoungList* young_list = _g1->young_list();
1248
1249 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1250 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1251 size_t heap_used_bytes_after_gc = _g1->used();
1252
1253 size_t heap_capacity_bytes_after_gc = _g1->capacity();
1254 size_t eden_capacity_bytes_after_gc =
1255 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1256
1257 gclog_or_tty->print(
1258 " [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "
1259 "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "
1260 "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"
1261 EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",
1262 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
1263 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
1264 EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
1265 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
1266 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
1267 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
1268 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
1269 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
1270 EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
1271 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
1272
1273 if (full) {
1274 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1275 }
1276
1277 gclog_or_tty->cr();
1278 }
1279
1280 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1281 phase_times()->print(pause_time_sec);
1282 }
1283
1284 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1285 double update_rs_processed_buffers,
1286 double goal_ms) {
1287 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1288 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1289
1290 if (G1UseAdaptiveConcRefinement) {
1291 const int k_gy = 3, k_gr = 6;
1292 const double inc_k = 1.1, dec_k = 0.9;
1293
1294 int g = cg1r->green_zone();
1295 if (update_rs_time > goal_ms) {
1296 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1297 } else {
1298 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1299 g = (int)MAX2(g * inc_k, g + 1.0);
1300 }
1301 }
1302 // Change the refinement threads params
1303 cg1r->set_green_zone(g);
1304 cg1r->set_yellow_zone(g * k_gy);
1305 cg1r->set_red_zone(g * k_gr);
1306 cg1r->reinitialize_threads();
1307
1308 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
1309 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1310 cg1r->yellow_zone());
1311 // Change the barrier params
1312 dcqs.set_process_completed_threshold(processing_threshold);
1313 dcqs.set_max_completed_queue(cg1r->red_zone());
1314 }
1315
1316 int curr_queue_size = dcqs.completed_buffers_num();
1317 if (curr_queue_size >= cg1r->yellow_zone()) {
1318 dcqs.set_completed_queue_padding(curr_queue_size);
1319 } else {
1320 dcqs.set_completed_queue_padding(0);
1321 }
1322 dcqs.notify_if_necessary();
1323 }
1324
1325 size_t G1CollectorPolicy::predict_rs_length_diff() const {
1326 return (size_t) get_new_prediction(_rs_length_diff_seq);
1327 }
1328
1329 double G1CollectorPolicy::predict_alloc_rate_ms() const {
1330 return get_new_prediction(_alloc_rate_ms_seq);
1331 }
1332
1333 double G1CollectorPolicy::predict_cost_per_card_ms() const {
1334 return get_new_prediction(_cost_per_card_ms_seq);
1335 }
1336
1337 double G1CollectorPolicy::predict_scan_hcc_ms() const {
1338 return get_new_prediction(_cost_scan_hcc_seq);
1339 }
1340
1341 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
1342 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
1343 }
1344
1345 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
1346 return get_new_prediction(_young_cards_per_entry_ratio_seq);
1347 }
1348
1349 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
1350 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
1351 return predict_young_cards_per_entry_ratio();
1352 } else {
1353 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
1354 }
1355 }
1356
1357 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
1358 return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
1359 }
1360
1361 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
1362 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
1363 }
1364
1365 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
1366 if (collector_state()->gcs_are_young()) {
1367 return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1368 } else {
1369 return predict_mixed_rs_scan_time_ms(card_num);
1370 }
1371 }
1372
1373 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
1374 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
1375 return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1376 } else {
1377 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
1378 }
1379 }
1380
1381 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
1382 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
1383 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
1384 } else {
1385 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
1386 }
1387 }
1388
1389 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
1390 if (collector_state()->during_concurrent_mark()) {
1391 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
1392 } else {
1393 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
1394 }
1395 }
1396
1397 double G1CollectorPolicy::predict_constant_other_time_ms() const {
1398 return get_new_prediction(_constant_other_time_ms_seq);
1399 }
1400
1401 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
1402 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
1403 }
1404
1405 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
1406 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
1407 }
1408
1409 double G1CollectorPolicy::predict_remark_time_ms() const {
1410 return get_new_prediction(_concurrent_mark_remark_times_ms);
1411 }
1412
1413 double G1CollectorPolicy::predict_cleanup_time_ms() const {
1414 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
1415 }
1416
1417 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
1418 TruncatedSeq* seq = surv_rate_group->get_seq(age);
1419 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1420 double pred = get_new_prediction(seq);
1421 if (pred > 1.0) {
1422 pred = 1.0;
1423 }
1424 return pred;
1425 }
1426
1427 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1428 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1429 }
1430
1431 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1432 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1433 }
1434
1435 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1436 size_t scanned_cards) const {
1437 return
1438 predict_rs_update_time_ms(pending_cards) +
1439 predict_rs_scan_time_ms(scanned_cards) +
1440 predict_constant_other_time_ms();
1441 }
1442
1443 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1444 size_t rs_length = predict_rs_length_diff();
1445 size_t card_num;
1446 if (collector_state()->gcs_are_young()) {
1447 card_num = predict_young_card_num(rs_length);
1448 } else {
1449 card_num = predict_non_young_card_num(rs_length);
1450 }
1451 return predict_base_elapsed_time_ms(pending_cards, card_num);
1452 }
1453
1454 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1455 size_t bytes_to_copy;
1456 if (hr->is_marked())
1457 bytes_to_copy = hr->max_live_bytes();
1458 else {
1459 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1460 int age = hr->age_in_surv_rate_group();
1461 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1462 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1463 }
1464 return bytes_to_copy;
1465 }
1466
1467 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1468 bool for_young_gc) const {
1469 size_t rs_length = hr->rem_set()->occupied();
1470 size_t card_num;
1471
1472 // Predicting the number of cards is based on which type of GC
1473 // we're predicting for.
1474 if (for_young_gc) {
1475 card_num = predict_young_card_num(rs_length);
1476 } else {
1477 card_num = predict_non_young_card_num(rs_length);
1478 }
1479 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1480
1481 double region_elapsed_time_ms =
1482 predict_rs_scan_time_ms(card_num) +
1483 predict_object_copy_time_ms(bytes_to_copy);
1484
1485 // The prediction of the "other" time for this region is based
1486 // upon the region type and NOT the GC type.
1487 if (hr->is_young()) {
1488 region_elapsed_time_ms += predict_young_other_time_ms(1);
1489 } else {
1490 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1491 }
1492 return region_elapsed_time_ms;
1493 }
1494
1495 void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1496 uint survivor_cset_region_length) {
1497 _eden_cset_region_length = eden_cset_region_length;
1498 _survivor_cset_region_length = survivor_cset_region_length;
1499 _old_cset_region_length = 0;
1500 }
1501
1502 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1503 _recorded_rs_lengths = rs_lengths;
1504 }
1505
1506 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1507 double elapsed_ms) {
1508 _recent_gc_times_ms->add(elapsed_ms);
1509 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1510 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1511 }
1512
1513 size_t G1CollectorPolicy::expansion_amount() const {
1514 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1515 double threshold = _gc_overhead_perc;
1516 if (recent_gc_overhead > threshold) {
1517 // We will double the existing space, or take
1518 // G1ExpandByPercentOfAvailable % of the available expansion
1519 // space, whichever is smaller, bounded below by a minimum
1520 // expansion (unless that's all that's left.)
1521 const size_t min_expand_bytes = 1*M;
1522 size_t reserved_bytes = _g1->max_capacity();
1523 size_t committed_bytes = _g1->capacity();
1524 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1525 size_t expand_bytes;
1526 size_t expand_bytes_via_pct =
1527 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1528 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1529 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1530 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1531
1532 ergo_verbose5(ErgoHeapSizing,
1533 "attempt heap expansion",
1534 ergo_format_reason("recent GC overhead higher than "
1535 "threshold after GC")
1536 ergo_format_perc("recent GC overhead")
1537 ergo_format_perc("threshold")
1538 ergo_format_byte("uncommitted")
1539 ergo_format_byte_perc("calculated expansion amount"),
1540 recent_gc_overhead, threshold,
1541 uncommitted_bytes,
1542 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1543
1544 return expand_bytes;
1545 } else {
1546 return 0;
1547 }
1548 }
1549
1550 void G1CollectorPolicy::print_tracing_info() const {
1551 _trace_young_gen_time_data.print();
1552 _trace_old_gen_time_data.print();
1553 }
1554
1555 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1556 #ifndef PRODUCT
1557 _short_lived_surv_rate_group->print_surv_rate_summary();
1558 // add this call for any other surv rate groups
1559 #endif // PRODUCT
1560 }
1561
1562 bool G1CollectorPolicy::is_young_list_full() const {
1563 uint young_list_length = _g1->young_list()->length();
1564 uint young_list_target_length = _young_list_target_length;
1565 return young_list_length >= young_list_target_length;
1566 }
1567
1568 bool G1CollectorPolicy::can_expand_young_list() const {
1569 uint young_list_length = _g1->young_list()->length();
1570 uint young_list_max_length = _young_list_max_length;
1571 return young_list_length < young_list_max_length;
1572 }
1573
1574 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1575 uint expansion_region_num = 0;
1576 if (GCLockerEdenExpansionPercent > 0) {
1577 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1578 double expansion_region_num_d = perc * (double) _young_list_target_length;
1579 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1580 // less than 1.0) we'll get 1.
1581 expansion_region_num = (uint) ceil(expansion_region_num_d);
1582 } else {
1583 assert(expansion_region_num == 0, "sanity");
1584 }
1585 _young_list_max_length = _young_list_target_length + expansion_region_num;
1586 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1587 }
1588
1589 // Calculates survivor space parameters.
1590 void G1CollectorPolicy::update_survivors_policy() {
1591 double max_survivor_regions_d =
1592 (double) _young_list_target_length / (double) SurvivorRatio;
1593 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1594 // smaller than 1.0) we'll get 1.
1595 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1596
1597 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1598 HeapRegion::GrainWords * _max_survivor_regions, counters());
1599 }
1600
1601 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1602 // We actually check whether we are marking here and not if we are in a
1603 // reclamation phase. This means that we will schedule a concurrent mark
1604 // even while we are still in the process of reclaiming memory.
1605 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1606 if (!during_cycle) {
1607 ergo_verbose1(ErgoConcCycles,
1608 "request concurrent cycle initiation",
1609 ergo_format_reason("requested by GC cause")
1610 ergo_format_str("GC cause"),
1611 GCCause::to_string(gc_cause));
1612 collector_state()->set_initiate_conc_mark_if_possible(true);
1613 return true;
1614 } else {
1615 ergo_verbose1(ErgoConcCycles,
1616 "do not request concurrent cycle initiation",
1617 ergo_format_reason("concurrent cycle already in progress")
1618 ergo_format_str("GC cause"),
1619 GCCause::to_string(gc_cause));
1620 return false;
1621 }
1622 }
1623
1624 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
1625 // We are about to decide on whether this pause will be an
1626 // initial-mark pause.
1627
1628 // First, collector_state()->during_initial_mark_pause() should not be already set. We
1629 // will set it here if we have to. However, it should be cleared by
1630 // the end of the pause (it's only set for the duration of an
1631 // initial-mark pause).
1632 assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1633
1634 if (collector_state()->initiate_conc_mark_if_possible()) {
1635 // We had noticed on a previous pause that the heap occupancy has
1636 // gone over the initiating threshold and we should start a
1637 // concurrent marking cycle. So we might initiate one.
1638
1639 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1640 // Initiate a new initial mark only if there is no marking or reclamation going
1641 // on.
1642
1643 collector_state()->set_during_initial_mark_pause(true);
1644 // And we can now clear initiate_conc_mark_if_possible() as
1645 // we've already acted on it.
1646 collector_state()->set_initiate_conc_mark_if_possible(false);
1647
1648 ergo_verbose0(ErgoConcCycles,
1649 "initiate concurrent cycle",
1650 ergo_format_reason("concurrent cycle initiation requested"));
1651 } else {
1652 // The concurrent marking thread is still finishing up the
1653 // previous cycle. If we start one right now the two cycles
1654 // overlap. In particular, the concurrent marking thread might
1655 // be in the process of clearing the next marking bitmap (which
1656 // we will use for the next cycle if we start one). Starting a
1657 // cycle now will be bad given that parts of the marking
1658 // information might get cleared by the marking thread. And we
1659 // cannot wait for the marking thread to finish the cycle as it
1660 // periodically yields while clearing the next marking bitmap
1661 // and, if it's in a yield point, it's waiting for us to
1662 // finish. So, at this point we will not start a cycle and we'll
1663 // let the concurrent marking thread complete the last one.
1664 ergo_verbose0(ErgoConcCycles,
1665 "do not initiate concurrent cycle",
1666 ergo_format_reason("concurrent cycle already in progress"));
1667 }
1668 }
1669 }
1670
1671 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1672 G1CollectedHeap* _g1h;
1673 CSetChooserParUpdater _cset_updater;
1674
1675 public:
1676 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1677 uint chunk_size) :
1678 _g1h(G1CollectedHeap::heap()),
1679 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1680
1681 bool doHeapRegion(HeapRegion* r) {
1682 // Do we have any marking information for this region?
1683 if (r->is_marked()) {
1684 // We will skip any region that's currently used as an old GC
1685 // alloc region (we should not consider those for collection
1686 // before we fill them up).
1687 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1688 _cset_updater.add_region(r);
1689 }
1690 }
1691 return false;
1692 }
1693 };
1694
1695 class ParKnownGarbageTask: public AbstractGangTask {
1696 CollectionSetChooser* _hrSorted;
1697 uint _chunk_size;
1698 G1CollectedHeap* _g1;
1699 HeapRegionClaimer _hrclaimer;
1700
1701 public:
1702 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1703 AbstractGangTask("ParKnownGarbageTask"),
1704 _hrSorted(hrSorted), _chunk_size(chunk_size),
1705 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1706
1707 void work(uint worker_id) {
1708 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1709 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1710 }
1711 };
1712
1713 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1714 assert(n_workers > 0, "Active gc workers should be greater than 0");
1715 const uint overpartition_factor = 4;
1716 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1717 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1718 }
1719
1720 void
1721 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1722 _collectionSetChooser->clear();
1723
1724 WorkGang* workers = _g1->workers();
1725 uint n_workers = workers->active_workers();
1726
1727 uint n_regions = _g1->num_regions();
1728 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1729 _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1730 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1731 workers->run_task(&par_known_garbage_task);
1732
1733 _collectionSetChooser->sort_regions();
1734
1735 double end_sec = os::elapsedTime();
1736 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1737 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1738 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1739 _prev_collection_pause_end_ms += elapsed_time_ms;
1740 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
1741 }
1742
1743 // Add the heap region at the head of the non-incremental collection set
1744 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1745 assert(_inc_cset_build_state == Active, "Precondition");
1746 assert(hr->is_old(), "the region should be old");
1747
1748 assert(!hr->in_collection_set(), "should not already be in the CSet");
1749 _g1->register_old_region_with_cset(hr);
1750 hr->set_next_in_collection_set(_collection_set);
1751 _collection_set = hr;
1752 _collection_set_bytes_used_before += hr->used();
1753 size_t rs_length = hr->rem_set()->occupied();
1754 _recorded_rs_lengths += rs_length;
1755 _old_cset_region_length += 1;
1756 }
1757
1758 // Initialize the per-collection-set information
1759 void G1CollectorPolicy::start_incremental_cset_building() {
1760 assert(_inc_cset_build_state == Inactive, "Precondition");
1761
1762 _inc_cset_head = NULL;
1763 _inc_cset_tail = NULL;
1764 _inc_cset_bytes_used_before = 0;
1765
1766 _inc_cset_max_finger = 0;
1767 _inc_cset_recorded_rs_lengths = 0;
1768 _inc_cset_recorded_rs_lengths_diffs = 0;
1769 _inc_cset_predicted_elapsed_time_ms = 0.0;
1770 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1771 _inc_cset_build_state = Active;
1772 }
1773
1774 void G1CollectorPolicy::finalize_incremental_cset_building() {
1775 assert(_inc_cset_build_state == Active, "Precondition");
1776 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1777
1778 // The two "main" fields, _inc_cset_recorded_rs_lengths and
1779 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1780 // that adds a new region to the CSet. Further updates by the
1781 // concurrent refinement thread that samples the young RSet lengths
1782 // are accumulated in the *_diffs fields. Here we add the diffs to
1783 // the "main" fields.
1784
1785 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1786 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1787 } else {
1788 // This is defensive. The diff should in theory be always positive
1789 // as RSets can only grow between GCs. However, given that we
1790 // sample their size concurrently with other threads updating them
1791 // it's possible that we might get the wrong size back, which
1792 // could make the calculations somewhat inaccurate.
1793 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1794 if (_inc_cset_recorded_rs_lengths >= diffs) {
1795 _inc_cset_recorded_rs_lengths -= diffs;
1796 } else {
1797 _inc_cset_recorded_rs_lengths = 0;
1798 }
1799 }
1800 _inc_cset_predicted_elapsed_time_ms +=
1801 _inc_cset_predicted_elapsed_time_ms_diffs;
1802
1803 _inc_cset_recorded_rs_lengths_diffs = 0;
1804 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1805 }
1806
1807 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1808 // This routine is used when:
1809 // * adding survivor regions to the incremental cset at the end of an
1810 // evacuation pause,
1811 // * adding the current allocation region to the incremental cset
1812 // when it is retired, and
1813 // * updating existing policy information for a region in the
1814 // incremental cset via young list RSet sampling.
1815 // Therefore this routine may be called at a safepoint by the
1816 // VM thread, or in-between safepoints by mutator threads (when
1817 // retiring the current allocation region) or a concurrent
1818 // refine thread (RSet sampling).
1819
1820 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1821 size_t used_bytes = hr->used();
1822 _inc_cset_recorded_rs_lengths += rs_length;
1823 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1824 _inc_cset_bytes_used_before += used_bytes;
1825
1826 // Cache the values we have added to the aggregated information
1827 // in the heap region in case we have to remove this region from
1828 // the incremental collection set, or it is updated by the
1829 // rset sampling code
1830 hr->set_recorded_rs_length(rs_length);
1831 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1832 }
1833
1834 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1835 size_t new_rs_length) {
1836 // Update the CSet information that is dependent on the new RS length
1837 assert(hr->is_young(), "Precondition");
1838 assert(!SafepointSynchronize::is_at_safepoint(),
1839 "should not be at a safepoint");
1840
1841 // We could have updated _inc_cset_recorded_rs_lengths and
1842 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1843 // that atomically, as this code is executed by a concurrent
1844 // refinement thread, potentially concurrently with a mutator thread
1845 // allocating a new region and also updating the same fields. To
1846 // avoid the atomic operations we accumulate these updates on two
1847 // separate fields (*_diffs) and we'll just add them to the "main"
1848 // fields at the start of a GC.
1849
1850 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1851 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1852 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1853
1854 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1855 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1856 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1857 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1858
1859 hr->set_recorded_rs_length(new_rs_length);
1860 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1861 }
1862
1863 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1864 assert(hr->is_young(), "invariant");
1865 assert(hr->young_index_in_cset() > -1, "should have already been set");
1866 assert(_inc_cset_build_state == Active, "Precondition");
1867
1868 // We need to clear and set the cached recorded/cached collection set
1869 // information in the heap region here (before the region gets added
1870 // to the collection set). An individual heap region's cached values
1871 // are calculated, aggregated with the policy collection set info,
1872 // and cached in the heap region here (initially) and (subsequently)
1873 // by the Young List sampling code.
1874
1875 size_t rs_length = hr->rem_set()->occupied();
1876 add_to_incremental_cset_info(hr, rs_length);
1877
1878 HeapWord* hr_end = hr->end();
1879 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1880
1881 assert(!hr->in_collection_set(), "invariant");
1882 _g1->register_young_region_with_cset(hr);
1883 assert(hr->next_in_collection_set() == NULL, "invariant");
1884 }
1885
1886 // Add the region at the RHS of the incremental cset
1887 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1888 // We should only ever be appending survivors at the end of a pause
1889 assert(hr->is_survivor(), "Logic");
1890
1891 // Do the 'common' stuff
1892 add_region_to_incremental_cset_common(hr);
1893
1894 // Now add the region at the right hand side
1895 if (_inc_cset_tail == NULL) {
1896 assert(_inc_cset_head == NULL, "invariant");
1897 _inc_cset_head = hr;
1898 } else {
1899 _inc_cset_tail->set_next_in_collection_set(hr);
1900 }
1901 _inc_cset_tail = hr;
1902 }
1903
1904 // Add the region to the LHS of the incremental cset
1905 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1906 // Survivors should be added to the RHS at the end of a pause
1907 assert(hr->is_eden(), "Logic");
1908
1909 // Do the 'common' stuff
1910 add_region_to_incremental_cset_common(hr);
1911
1912 // Add the region at the left hand side
1913 hr->set_next_in_collection_set(_inc_cset_head);
1914 if (_inc_cset_head == NULL) {
1915 assert(_inc_cset_tail == NULL, "Invariant");
1916 _inc_cset_tail = hr;
1917 }
1918 _inc_cset_head = hr;
1919 }
1920
1921 #ifndef PRODUCT
1922 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
1923 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
1924
1925 st->print_cr("\nCollection_set:");
1926 HeapRegion* csr = list_head;
1927 while (csr != NULL) {
1928 HeapRegion* next = csr->next_in_collection_set();
1929 assert(csr->in_collection_set(), "bad CS");
1930 st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
1931 HR_FORMAT_PARAMS(csr),
1932 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
1933 csr->age_in_surv_rate_group_cond());
1934 csr = next;
1935 }
1936 }
1937 #endif // !PRODUCT
1938
1939 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1940 // Returns the given amount of reclaimable bytes (that represents
1941 // the amount of reclaimable space still to be collected) as a
1942 // percentage of the current heap capacity.
1943 size_t capacity_bytes = _g1->capacity();
1944 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1945 }
1946
1947 void G1CollectorPolicy::maybe_start_marking() {
1948 if (need_to_start_conc_mark("end of GC")) {
1949 // Note: this might have already been set, if during the last
1950 // pause we decided to start a cycle but at the beginning of
1951 // this pause we decided to postpone it. That's OK.
1952 collector_state()->set_initiate_conc_mark_if_possible(true);
1953 }
1954 }
1955
1956 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1957 const char* false_action_str) const {
1958 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1959 if (cset_chooser->is_empty()) {
1960 ergo_verbose0(ErgoMixedGCs,
1961 false_action_str,
1962 ergo_format_reason("candidate old regions not available"));
1963 return false;
1964 }
1965
1966 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1967 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1968 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1969 double threshold = (double) G1HeapWastePercent;
1970 if (reclaimable_perc <= threshold) {
1971 ergo_verbose4(ErgoMixedGCs,
1972 false_action_str,
1973 ergo_format_reason("reclaimable percentage not over threshold")
1974 ergo_format_region("candidate old regions")
1975 ergo_format_byte_perc("reclaimable")
1976 ergo_format_perc("threshold"),
1977 cset_chooser->remaining_regions(),
1978 reclaimable_bytes,
1979 reclaimable_perc, threshold);
1980 return false;
1981 }
1982
1983 ergo_verbose4(ErgoMixedGCs,
1984 true_action_str,
1985 ergo_format_reason("candidate old regions available")
1986 ergo_format_region("candidate old regions")
1987 ergo_format_byte_perc("reclaimable")
1988 ergo_format_perc("threshold"),
1989 cset_chooser->remaining_regions(),
1990 reclaimable_bytes,
1991 reclaimable_perc, threshold);
1992 return true;
1993 }
1994
1995 uint G1CollectorPolicy::calc_min_old_cset_length() const {
1996 // The min old CSet region bound is based on the maximum desired
1997 // number of mixed GCs after a cycle. I.e., even if some old regions
1998 // look expensive, we should add them to the CSet anyway to make
1999 // sure we go through the available old regions in no more than the
2000 // maximum desired number of mixed GCs.
2001 //
2002 // The calculation is based on the number of marked regions we added
2003 // to the CSet chooser in the first place, not how many remain, so
2004 // that the result is the same during all mixed GCs that follow a cycle.
2005
2006 const size_t region_num = (size_t) _collectionSetChooser->length();
2007 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
2008 size_t result = region_num / gc_num;
2009 // emulate ceiling
2010 if (result * gc_num < region_num) {
2011 result += 1;
2012 }
2013 return (uint) result;
2014 }
2015
2016 uint G1CollectorPolicy::calc_max_old_cset_length() const {
2017 // The max old CSet region bound is based on the threshold expressed
2018 // as a percentage of the heap size. I.e., it should bound the
2019 // number of old regions added to the CSet irrespective of how many
2020 // of them are available.
2021
2022 const G1CollectedHeap* g1h = G1CollectedHeap::heap();
2023 const size_t region_num = g1h->num_regions();
2024 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
2025 size_t result = region_num * perc / 100;
2026 // emulate ceiling
2027 if (100 * result < region_num * perc) {
2028 result += 1;
2029 }
2030 return (uint) result;
2031 }
2032
2033
2034 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2035 double young_start_time_sec = os::elapsedTime();
2036
2037 YoungList* young_list = _g1->young_list();
2038 finalize_incremental_cset_building();
2039
2040 guarantee(target_pause_time_ms > 0.0,
2041 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2042 guarantee(_collection_set == NULL, "Precondition");
2043
2044 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2045 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2046
2047 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2048 "start choosing CSet",
2049 ergo_format_size("_pending_cards")
2050 ergo_format_ms("predicted base time")
2051 ergo_format_ms("remaining time")
2052 ergo_format_ms("target pause time"),
2053 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2054
2055 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2056
2057 if (collector_state()->last_gc_was_young()) {
2058 _trace_young_gen_time_data.increment_young_collection_count();
2059 } else {
2060 _trace_young_gen_time_data.increment_mixed_collection_count();
2061 }
2062
2063 // The young list is laid with the survivor regions from the previous
2064 // pause are appended to the RHS of the young list, i.e.
2065 // [Newly Young Regions ++ Survivors from last pause].
2066
2067 uint survivor_region_length = young_list->survivor_length();
2068 uint eden_region_length = young_list->eden_length();
2069 init_cset_region_lengths(eden_region_length, survivor_region_length);
2070
2071 HeapRegion* hr = young_list->first_survivor_region();
2072 while (hr != NULL) {
2073 assert(hr->is_survivor(), "badly formed young list");
2074 // There is a convention that all the young regions in the CSet
2075 // are tagged as "eden", so we do this for the survivors here. We
2076 // use the special set_eden_pre_gc() as it doesn't check that the
2077 // region is free (which is not the case here).
2078 hr->set_eden_pre_gc();
2079 hr = hr->get_next_young_region();
2080 }
2081
2082 // Clear the fields that point to the survivor list - they are all young now.
2083 young_list->clear_survivors();
2084
2085 _collection_set = _inc_cset_head;
2086 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2087 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2088
2089 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2090 "add young regions to CSet",
2091 ergo_format_region("eden")
2092 ergo_format_region("survivors")
2093 ergo_format_ms("predicted young region time")
2094 ergo_format_ms("target pause time"),
2095 eden_region_length, survivor_region_length,
2096 _inc_cset_predicted_elapsed_time_ms,
2097 target_pause_time_ms);
2098
2099 // The number of recorded young regions is the incremental
2100 // collection set's current size
2101 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2102
2103 double young_end_time_sec = os::elapsedTime();
2104 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2105
2106 return time_remaining_ms;
2107 }
2108
2109 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2110 double non_young_start_time_sec = os::elapsedTime();
2111 double predicted_old_time_ms = 0.0;
2112
2113
2114 if (!collector_state()->gcs_are_young()) {
2115 CollectionSetChooser* cset_chooser = _collectionSetChooser;
2116 cset_chooser->verify();
2117 const uint min_old_cset_length = calc_min_old_cset_length();
2118 const uint max_old_cset_length = calc_max_old_cset_length();
2119
2120 uint expensive_region_num = 0;
2121 bool check_time_remaining = adaptive_young_list_length();
2122
2123 HeapRegion* hr = cset_chooser->peek();
2124 while (hr != NULL) {
2125 if (old_cset_region_length() >= max_old_cset_length) {
2126 // Added maximum number of old regions to the CSet.
2127 ergo_verbose2(ErgoCSetConstruction,
2128 "finish adding old regions to CSet",
2129 ergo_format_reason("old CSet region num reached max")
2130 ergo_format_region("old")
2131 ergo_format_region("max"),
2132 old_cset_region_length(), max_old_cset_length);
2133 break;
2134 }
2135
2136
2137 // Stop adding regions if the remaining reclaimable space is
2138 // not above G1HeapWastePercent.
2139 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2140 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2141 double threshold = (double) G1HeapWastePercent;
2142 if (reclaimable_perc <= threshold) {
2143 // We've added enough old regions that the amount of uncollected
2144 // reclaimable space is at or below the waste threshold. Stop
2145 // adding old regions to the CSet.
2146 ergo_verbose5(ErgoCSetConstruction,
2147 "finish adding old regions to CSet",
2148 ergo_format_reason("reclaimable percentage not over threshold")
2149 ergo_format_region("old")
2150 ergo_format_region("max")
2151 ergo_format_byte_perc("reclaimable")
2152 ergo_format_perc("threshold"),
2153 old_cset_region_length(),
2154 max_old_cset_length,
2155 reclaimable_bytes,
2156 reclaimable_perc, threshold);
2157 break;
2158 }
2159
2160 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2161 if (check_time_remaining) {
2162 if (predicted_time_ms > time_remaining_ms) {
2163 // Too expensive for the current CSet.
2164
2165 if (old_cset_region_length() >= min_old_cset_length) {
2166 // We have added the minimum number of old regions to the CSet,
2167 // we are done with this CSet.
2168 ergo_verbose4(ErgoCSetConstruction,
2169 "finish adding old regions to CSet",
2170 ergo_format_reason("predicted time is too high")
2171 ergo_format_ms("predicted time")
2172 ergo_format_ms("remaining time")
2173 ergo_format_region("old")
2174 ergo_format_region("min"),
2175 predicted_time_ms, time_remaining_ms,
2176 old_cset_region_length(), min_old_cset_length);
2177 break;
2178 }
2179
2180 // We'll add it anyway given that we haven't reached the
2181 // minimum number of old regions.
2182 expensive_region_num += 1;
2183 }
2184 } else {
2185 if (old_cset_region_length() >= min_old_cset_length) {
2186 // In the non-auto-tuning case, we'll finish adding regions
2187 // to the CSet if we reach the minimum.
2188 ergo_verbose2(ErgoCSetConstruction,
2189 "finish adding old regions to CSet",
2190 ergo_format_reason("old CSet region num reached min")
2191 ergo_format_region("old")
2192 ergo_format_region("min"),
2193 old_cset_region_length(), min_old_cset_length);
2194 break;
2195 }
2196 }
2197
2198 // We will add this region to the CSet.
2199 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2200 predicted_old_time_ms += predicted_time_ms;
2201 cset_chooser->pop(); // already have region via peek()
2202 _g1->old_set_remove(hr);
2203 add_old_region_to_cset(hr);
2204
2205 hr = cset_chooser->peek();
2206 }
2207 if (hr == NULL) {
2208 ergo_verbose0(ErgoCSetConstruction,
2209 "finish adding old regions to CSet",
2210 ergo_format_reason("candidate old regions not available"));
2211 }
2212
2213 if (expensive_region_num > 0) {
2214 // We print the information once here at the end, predicated on
2215 // whether we added any apparently expensive regions or not, to
2216 // avoid generating output per region.
2217 ergo_verbose4(ErgoCSetConstruction,
2218 "added expensive regions to CSet",
2219 ergo_format_reason("old CSet region num not reached min")
2220 ergo_format_region("old")
2221 ergo_format_region("expensive")
2222 ergo_format_region("min")
2223 ergo_format_ms("remaining time"),
2224 old_cset_region_length(),
2225 expensive_region_num,
2226 min_old_cset_length,
2227 time_remaining_ms);
2228 }
2229
2230 cset_chooser->verify();
2231 }
2232
2233 stop_incremental_cset_building();
2234
2235 ergo_verbose3(ErgoCSetConstruction,
2236 "finish choosing CSet",
2237 ergo_format_region("old")
2238 ergo_format_ms("predicted old region time")
2239 ergo_format_ms("time remaining"),
2240 old_cset_region_length(),
2241 predicted_old_time_ms, time_remaining_ms);
2242
2243 double non_young_end_time_sec = os::elapsedTime();
2244 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2245 }
2246
2247 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2248 if(TraceYoungGenTime) {
2249 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2250 }
2251 }
2252
2253 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2254 if(TraceYoungGenTime) {
2255 _all_yield_times_ms.add(yield_time_ms);
2256 }
2257 }
2258
2259 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2260 if(TraceYoungGenTime) {
2261 _total.add(pause_time_ms);
2262 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2263 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2264 _parallel.add(phase_times->cur_collection_par_time_ms());
2265 _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
2266 _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
2267 _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
2268 _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
2269 _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
2270 _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
2271
2272 double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
2273 phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
2274 phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
2275 phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
2276 phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
2277 phase_times->average_time_ms(G1GCPhaseTimes::Termination);
2278
2279 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2280 _parallel_other.add(parallel_other_time);
2281 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2282 }
2283 }
2284
2285 void TraceYoungGenTimeData::increment_young_collection_count() {
2286 if(TraceYoungGenTime) {
2287 ++_young_pause_num;
2288 }
2289 }
2290
2291 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2292 if(TraceYoungGenTime) {
2293 ++_mixed_pause_num;
2294 }
2295 }
2296
2297 void TraceYoungGenTimeData::print_summary(const char* str,
2298 const NumberSeq* seq) const {
2299 double sum = seq->sum();
2300 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2301 str, sum / 1000.0, seq->avg());
2302 }
2303
2304 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2305 const NumberSeq* seq) const {
2306 print_summary(str, seq);
2307 gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2308 "(num", seq->num(), seq->sd(), seq->maximum());
2309 }
2310
2311 void TraceYoungGenTimeData::print() const {
2312 if (!TraceYoungGenTime) {
2313 return;
2314 }
2315
2316 gclog_or_tty->print_cr("ALL PAUSES");
2317 print_summary_sd(" Total", &_total);
2318 gclog_or_tty->cr();
2319 gclog_or_tty->cr();
2320 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2321 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2322 gclog_or_tty->cr();
2323
2324 gclog_or_tty->print_cr("EVACUATION PAUSES");
2325
2326 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2327 gclog_or_tty->print_cr("none");
2328 } else {
2329 print_summary_sd(" Evacuation Pauses", &_total);
2330 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2331 print_summary(" Parallel Time", &_parallel);
2332 print_summary(" Ext Root Scanning", &_ext_root_scan);
2333 print_summary(" SATB Filtering", &_satb_filtering);
2334 print_summary(" Update RS", &_update_rs);
2335 print_summary(" Scan RS", &_scan_rs);
2336 print_summary(" Object Copy", &_obj_copy);
2337 print_summary(" Termination", &_termination);
2338 print_summary(" Parallel Other", &_parallel_other);
2339 print_summary(" Clear CT", &_clear_ct);
2340 print_summary(" Other", &_other);
2341 }
2342 gclog_or_tty->cr();
2343
2344 gclog_or_tty->print_cr("MISC");
2345 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2346 print_summary_sd(" Yields", &_all_yield_times_ms);
2347 }
2348
2349 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2350 if (TraceOldGenTime) {
2351 _all_full_gc_times.add(full_gc_time_ms);
2352 }
2353 }
2354
2355 void TraceOldGenTimeData::print() const {
2356 if (!TraceOldGenTime) {
2357 return;
2358 }
2359
2360 if (_all_full_gc_times.num() > 0) {
2361 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2362 _all_full_gc_times.num(),
2363 _all_full_gc_times.sum() / 1000.0);
2364 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2365 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2366 _all_full_gc_times.sd(),
2367 _all_full_gc_times.maximum());
2368 }
2369 }
--- EOF ---