Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1Policy.cpp
+++ new/src/hotspot/share/gc/g1/g1Policy.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc/g1/g1Analytics.hpp"
27 27 #include "gc/g1/g1Arguments.hpp"
28 28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 29 #include "gc/g1/g1CollectionSet.hpp"
30 30 #include "gc/g1/g1CollectionSetCandidates.hpp"
31 31 #include "gc/g1/g1ConcurrentMark.hpp"
32 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
33 33 #include "gc/g1/g1ConcurrentRefine.hpp"
34 34 #include "gc/g1/g1CollectionSetChooser.hpp"
35 35 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
36 36 #include "gc/g1/g1HotCardCache.hpp"
37 37 #include "gc/g1/g1IHOPControl.hpp"
38 38 #include "gc/g1/g1GCPhaseTimes.hpp"
39 39 #include "gc/g1/g1Policy.hpp"
40 40 #include "gc/g1/g1SurvivorRegions.hpp"
41 41 #include "gc/g1/g1YoungGenSizer.hpp"
42 42 #include "gc/g1/heapRegion.inline.hpp"
43 43 #include "gc/g1/heapRegionRemSet.hpp"
44 44 #include "gc/shared/gcPolicyCounters.hpp"
45 45 #include "logging/logStream.hpp"
46 46 #include "runtime/arguments.hpp"
47 47 #include "runtime/java.hpp"
48 48 #include "runtime/mutexLocker.hpp"
49 49 #include "utilities/debug.hpp"
50 50 #include "utilities/growableArray.hpp"
51 51 #include "utilities/pair.hpp"
52 52
53 53 G1Policy::G1Policy(STWGCTimer* gc_timer) :
54 54 _predictor(G1ConfidencePercent / 100.0),
55 55 _analytics(new G1Analytics(&_predictor)),
56 56 _remset_tracker(),
57 57 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
58 58 _ihop_control(create_ihop_control(&_predictor)),
59 59 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
60 60 _full_collection_start_sec(0.0),
61 61 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
62 62 _young_list_target_length(0),
63 63 _young_list_fixed_length(0),
64 64 _young_list_max_length(0),
65 65 _eden_surv_rate_group(new G1SurvRateGroup()),
66 66 _survivor_surv_rate_group(new G1SurvRateGroup()),
67 67 _reserve_factor((double) G1ReservePercent / 100.0),
68 68 _reserve_regions(0),
69 69 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
70 70 _free_regions_at_end_of_collection(0),
71 71 _rs_length(0),
72 72 _rs_length_prediction(0),
73 73 _pending_cards_at_gc_start(0),
74 74 _pending_cards_at_prev_gc_end(0),
75 75 _total_mutator_refined_cards(0),
76 76 _total_concurrent_refined_cards(0),
77 77 _total_concurrent_refinement_time(),
78 78 _bytes_allocated_in_old_since_last_gc(0),
79 79 _initial_mark_to_mixed(),
80 80 _collection_set(NULL),
81 81 _g1h(NULL),
82 82 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
83 83 _mark_remark_start_sec(0),
84 84 _mark_cleanup_start_sec(0),
85 85 _tenuring_threshold(MaxTenuringThreshold),
86 86 _max_survivor_regions(0),
87 87 _survivors_age_table(true)
88 88 {
89 89 }
90 90
91 91 G1Policy::~G1Policy() {
92 92 delete _ihop_control;
93 93 delete _young_gen_sizer;
94 94 }
95 95
96 96 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
97 97 if (G1Arguments::is_heterogeneous_heap()) {
98 98 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
99 99 } else {
100 100 return new G1Policy(gc_timer_stw);
101 101 }
102 102 }
103 103
104 104 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
105 105
106 106 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
107 107 _g1h = g1h;
108 108 _collection_set = collection_set;
109 109
110 110 assert(Heap_lock->owned_by_self(), "Locking discipline.");
111 111
112 112 if (!use_adaptive_young_list_length()) {
113 113 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
114 114 }
115 115 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
116 116
117 117 _free_regions_at_end_of_collection = _g1h->num_free_regions();
118 118
119 119 update_young_list_max_and_target_length();
120 120 // We may immediately start allocating regions and placing them on the
121 121 // collection set list. Initialize the per-collection set info
122 122 _collection_set->start_incremental_building();
123 123 }
124 124
125 125 void G1Policy::note_gc_start() {
126 126 phase_times()->note_gc_start();
127 127 }
128 128
129 129 class G1YoungLengthPredictor {
130 130 const double _base_time_ms;
131 131 const double _base_free_regions;
132 132 const double _target_pause_time_ms;
133 133 const G1Policy* const _policy;
134 134
135 135 public:
136 136 G1YoungLengthPredictor(double base_time_ms,
137 137 double base_free_regions,
138 138 double target_pause_time_ms,
139 139 const G1Policy* policy) :
140 140 _base_time_ms(base_time_ms),
141 141 _base_free_regions(base_free_regions),
142 142 _target_pause_time_ms(target_pause_time_ms),
143 143 _policy(policy) {}
144 144
145 145 bool will_fit(uint young_length) const {
146 146 if (young_length >= _base_free_regions) {
147 147 // end condition 1: not enough space for the young regions
148 148 return false;
149 149 }
150 150
151 151 size_t bytes_to_copy = 0;
152 152 const double copy_time_ms = _policy->predict_eden_copy_time_ms(young_length, &bytes_to_copy);
153 153 const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
154 154 const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
155 155 if (pause_time_ms > _target_pause_time_ms) {
156 156 // end condition 2: prediction is over the target pause time
157 157 return false;
158 158 }
159 159
160 160 const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
161 161
162 162 // When copying, we will likely need more bytes free than is live in the region.
163 163 // Add some safety margin to factor in the confidence of our guess, and the
164 164 // natural expected waste.
165 165 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
166 166 // of the calculation: the lower the confidence, the more headroom.
167 167 // (100 + TargetPLABWastePct) represents the increase in expected bytes during
168 168 // copying due to anticipated waste in the PLABs.
169 169 const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
170 170 const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
171 171
172 172 if (expected_bytes_to_copy > free_bytes) {
173 173 // end condition 3: out-of-space
174 174 return false;
175 175 }
176 176
177 177 // success!
178 178 return true;
179 179 }
180 180 };
181 181
182 182 void G1Policy::record_new_heap_size(uint new_number_of_regions) {
183 183 // re-calculate the necessary reserve
184 184 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
185 185 // We use ceiling so that if reserve_regions_d is > 0.0 (but
186 186 // smaller than 1.0) we'll get 1.
187 187 _reserve_regions = (uint) ceil(reserve_regions_d);
188 188
189 189 _young_gen_sizer->heap_size_changed(new_number_of_regions);
190 190
191 191 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
192 192 }
193 193
194 194 uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
195 195 uint desired_min_length = 0;
196 196 if (use_adaptive_young_list_length()) {
197 197 if (_analytics->num_alloc_rate_ms() > 3) {
198 198 double now_sec = os::elapsedTime();
199 199 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
200 200 double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
201 201 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
202 202 } else {
203 203 // otherwise we don't have enough info to make the prediction
204 204 }
205 205 }
206 206 desired_min_length += base_min_length;
207 207 // make sure we don't go below any user-defined minimum bound
208 208 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
209 209 }
210 210
211 211 uint G1Policy::calculate_young_list_desired_max_length() const {
212 212 // Here, we might want to also take into account any additional
213 213 // constraints (i.e., user-defined minimum bound). Currently, we
214 214 // effectively don't set this bound.
215 215 return _young_gen_sizer->max_desired_young_length();
216 216 }
217 217
218 218 uint G1Policy::update_young_list_max_and_target_length() {
219 219 return update_young_list_max_and_target_length(_analytics->predict_rs_length());
220 220 }
221 221
222 222 uint G1Policy::update_young_list_max_and_target_length(size_t rs_length) {
223 223 uint unbounded_target_length = update_young_list_target_length(rs_length);
224 224 update_max_gc_locker_expansion();
225 225 return unbounded_target_length;
226 226 }
227 227
228 228 uint G1Policy::update_young_list_target_length(size_t rs_length) {
229 229 YoungTargetLengths young_lengths = young_list_target_lengths(rs_length);
230 230 _young_list_target_length = young_lengths.first;
231 231
232 232 return young_lengths.second;
233 233 }
234 234
235 235 G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_length) const {
236 236 YoungTargetLengths result;
237 237
238 238 // Calculate the absolute and desired min bounds first.
239 239
240 240 // This is how many young regions we already have (currently: the survivors).
241 241 const uint base_min_length = _g1h->survivor_regions_count();
242 242 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
243 243 // This is the absolute minimum young length. Ensure that we
244 244 // will at least have one eden region available for allocation.
245 245 uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1);
246 246 // If we shrank the young list target it should not shrink below the current size.
247 247 desired_min_length = MAX2(desired_min_length, absolute_min_length);
248 248 // Calculate the absolute and desired max bounds.
249 249
250 250 uint desired_max_length = calculate_young_list_desired_max_length();
251 251
252 252 uint young_list_target_length = 0;
253 253 if (use_adaptive_young_list_length()) {
254 254 if (collector_state()->in_young_only_phase()) {
255 255 young_list_target_length =
256 256 calculate_young_list_target_length(rs_length,
257 257 base_min_length,
258 258 desired_min_length,
259 259 desired_max_length);
260 260 } else {
261 261 // Don't calculate anything and let the code below bound it to
262 262 // the desired_min_length, i.e., do the next GC as soon as
263 263 // possible to maximize how many old regions we can add to it.
264 264 }
265 265 } else {
266 266 // The user asked for a fixed young gen so we'll fix the young gen
267 267 // whether the next GC is young or mixed.
268 268 young_list_target_length = _young_list_fixed_length;
269 269 }
270 270
271 271 result.second = young_list_target_length;
272 272
273 273 // We will try our best not to "eat" into the reserve.
274 274 uint absolute_max_length = 0;
275 275 if (_free_regions_at_end_of_collection > _reserve_regions) {
276 276 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
277 277 }
278 278 if (desired_max_length > absolute_max_length) {
279 279 desired_max_length = absolute_max_length;
280 280 }
281 281
282 282 // Make sure we don't go over the desired max length, nor under the
283 283 // desired min length. In case they clash, desired_min_length wins
284 284 // which is why that test is second.
285 285 if (young_list_target_length > desired_max_length) {
286 286 young_list_target_length = desired_max_length;
287 287 }
288 288 if (young_list_target_length < desired_min_length) {
289 289 young_list_target_length = desired_min_length;
290 290 }
291 291
292 292 assert(young_list_target_length > base_min_length,
293 293 "we should be able to allocate at least one eden region");
294 294 assert(young_list_target_length >= absolute_min_length, "post-condition");
295 295
296 296 result.first = young_list_target_length;
297 297 return result;
298 298 }
299 299
300 300 uint G1Policy::calculate_young_list_target_length(size_t rs_length,
301 301 uint base_min_length,
302 302 uint desired_min_length,
303 303 uint desired_max_length) const {
304 304 assert(use_adaptive_young_list_length(), "pre-condition");
305 305 assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
306 306
307 307 // In case some edge-condition makes the desired max length too small...
308 308 if (desired_max_length <= desired_min_length) {
309 309 return desired_min_length;
310 310 }
311 311
312 312 // We'll adjust min_young_length and max_young_length not to include
313 313 // the already allocated young regions (i.e., so they reflect the
314 314 // min and max eden regions we'll allocate). The base_min_length
315 315 // will be reflected in the predictions by the
316 316 // survivor_regions_evac_time prediction.
317 317 assert(desired_min_length > base_min_length, "invariant");
318 318 uint min_young_length = desired_min_length - base_min_length;
319 319 assert(desired_max_length > base_min_length, "invariant");
320 320 uint max_young_length = desired_max_length - base_min_length;
321 321
322 322 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
323 323 const size_t pending_cards = _analytics->predict_pending_cards();
324 324 const double base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length);
325 325 const uint available_free_regions = _free_regions_at_end_of_collection;
326 326 const uint base_free_regions =
327 327 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
328 328
329 329 // Here, we will make sure that the shortest young length that
330 330 // makes sense fits within the target pause time.
331 331
332 332 G1YoungLengthPredictor p(base_time_ms,
333 333 base_free_regions,
334 334 target_pause_time_ms,
335 335 this);
336 336 if (p.will_fit(min_young_length)) {
337 337 // The shortest young length will fit into the target pause time;
338 338 // we'll now check whether the absolute maximum number of young
339 339 // regions will fit in the target pause time. If not, we'll do
340 340 // a binary search between min_young_length and max_young_length.
341 341 if (p.will_fit(max_young_length)) {
342 342 // The maximum young length will fit into the target pause time.
343 343 // We are done so set min young length to the maximum length (as
344 344 // the result is assumed to be returned in min_young_length).
345 345 min_young_length = max_young_length;
346 346 } else {
347 347 // The maximum possible number of young regions will not fit within
348 348 // the target pause time so we'll search for the optimal
349 349 // length. The loop invariants are:
350 350 //
351 351 // min_young_length < max_young_length
352 352 // min_young_length is known to fit into the target pause time
353 353 // max_young_length is known not to fit into the target pause time
354 354 //
355 355 // Going into the loop we know the above hold as we've just
356 356 // checked them. Every time around the loop we check whether
357 357 // the middle value between min_young_length and
358 358 // max_young_length fits into the target pause time. If it
359 359 // does, it becomes the new min. If it doesn't, it becomes
360 360 // the new max. This way we maintain the loop invariants.
361 361
362 362 assert(min_young_length < max_young_length, "invariant");
363 363 uint diff = (max_young_length - min_young_length) / 2;
364 364 while (diff > 0) {
365 365 uint young_length = min_young_length + diff;
366 366 if (p.will_fit(young_length)) {
367 367 min_young_length = young_length;
368 368 } else {
369 369 max_young_length = young_length;
370 370 }
371 371 assert(min_young_length < max_young_length, "invariant");
372 372 diff = (max_young_length - min_young_length) / 2;
373 373 }
374 374 // The results is min_young_length which, according to the
375 375 // loop invariants, should fit within the target pause time.
376 376
377 377 // These are the post-conditions of the binary search above:
378 378 assert(min_young_length < max_young_length,
379 379 "otherwise we should have discovered that max_young_length "
380 380 "fits into the pause target and not done the binary search");
381 381 assert(p.will_fit(min_young_length),
382 382 "min_young_length, the result of the binary search, should "
383 383 "fit into the pause target");
384 384 assert(!p.will_fit(min_young_length + 1),
385 385 "min_young_length, the result of the binary search, should be "
386 386 "optimal, so no larger length should fit into the pause target");
387 387 }
388 388 } else {
389 389 // Even the minimum length doesn't fit into the pause time
390 390 // target, return it as the result nevertheless.
391 391 }
392 392 return base_min_length + min_young_length;
393 393 }
394 394
395 395 double G1Policy::predict_survivor_regions_evac_time() const {
396 396 double survivor_regions_evac_time = 0.0;
397 397 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
398 398 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
399 399 it != survivor_regions->end();
400 400 ++it) {
401 401 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
402 402 }
403 403 return survivor_regions_evac_time;
404 404 }
405 405
406 406 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
407 407 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
408 408
409 409 if (rs_length > _rs_length_prediction) {
410 410 // add 10% to avoid having to recalculate often
411 411 size_t rs_length_prediction = rs_length * 1100 / 1000;
412 412 update_rs_length_prediction(rs_length_prediction);
413 413
414 414 update_young_list_max_and_target_length(rs_length_prediction);
415 415 }
416 416 }
417 417
418 418 void G1Policy::update_rs_length_prediction() {
419 419 update_rs_length_prediction(_analytics->predict_rs_length());
420 420 }
421 421
422 422 void G1Policy::update_rs_length_prediction(size_t prediction) {
423 423 if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
424 424 _rs_length_prediction = prediction;
425 425 }
426 426 }
427 427
428 428 void G1Policy::record_full_collection_start() {
429 429 _full_collection_start_sec = os::elapsedTime();
430 430 // Release the future to-space so that it is available for compaction into.
431 431 collector_state()->set_in_young_only_phase(false);
432 432 collector_state()->set_in_full_gc(true);
433 433 _collection_set->clear_candidates();
434 434 record_concurrent_refinement_data(true /* is_full_collection */);
435 435 }
436 436
437 437 void G1Policy::record_full_collection_end() {
438 438 // Consider this like a collection pause for the purposes of allocation
439 439 // since last pause.
440 440 double end_sec = os::elapsedTime();
441 441 double full_gc_time_sec = end_sec - _full_collection_start_sec;
442 442 double full_gc_time_ms = full_gc_time_sec * 1000.0;
443 443
444 444 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
445 445
446 446 collector_state()->set_in_full_gc(false);
447 447
448 448 // "Nuke" the heuristics that control the young/mixed GC
449 449 // transitions and make sure we start with young GCs after the Full GC.
450 450 collector_state()->set_in_young_only_phase(true);
451 451 collector_state()->set_in_young_gc_before_mixed(false);
452 452 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
453 453 collector_state()->set_in_initial_mark_gc(false);
454 454 collector_state()->set_mark_or_rebuild_in_progress(false);
455 455 collector_state()->set_clearing_next_bitmap(false);
456 456
457 457 _eden_surv_rate_group->start_adding_regions();
458 458 // also call this on any additional surv rate groups
459 459
460 460 _free_regions_at_end_of_collection = _g1h->num_free_regions();
461 461 _survivor_surv_rate_group->reset();
462 462 update_young_list_max_and_target_length();
463 463 update_rs_length_prediction();
464 464 _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
465 465
466 466 _bytes_allocated_in_old_since_last_gc = 0;
467 467
468 468 record_pause(FullGC, _full_collection_start_sec, end_sec);
469 469 }
470 470
471 471 void G1Policy::record_concurrent_refinement_data(bool is_full_collection) {
472 472 _pending_cards_at_gc_start = _g1h->pending_card_num();
473 473
474 474 // Record info about concurrent refinement thread processing.
475 475 G1ConcurrentRefine* cr = _g1h->concurrent_refine();
476 476 G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats();
477 477
478 478 Tickspan cr_time = cr_stats._time - _total_concurrent_refinement_time;
479 479 _total_concurrent_refinement_time = cr_stats._time;
480 480
481 481 size_t cr_cards = cr_stats._cards - _total_concurrent_refined_cards;
482 482 _total_concurrent_refined_cards = cr_stats._cards;
483 483
484 484 // Don't update rate if full collection. We could be in an implicit full
485 485 // collection after a non-full collection failure, in which case there
486 486 // wasn't any mutator/cr-thread activity since last recording. And if
487 487 // we're in an explicit full collection, the time since the last GC can
488 488 // be arbitrarily short, so not a very good sample. Similarly, don't
489 489 // update the rate if the current sample is empty or time is zero.
490 490 if (!is_full_collection && (cr_cards > 0) && (cr_time > Tickspan())) {
491 491 double rate = cr_cards / (cr_time.seconds() * MILLIUNITS);
492 492 _analytics->report_concurrent_refine_rate_ms(rate);
493 493 }
494 494
495 495 // Record info about mutator thread processing.
496 496 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
497 497 size_t mut_total_cards = dcqs.total_mutator_refined_cards();
498 498 size_t mut_cards = mut_total_cards - _total_mutator_refined_cards;
499 499 _total_mutator_refined_cards = mut_total_cards;
500 500
501 501 // Record mutator's card logging rate.
502 502 // Don't update if full collection; see above.
503 503 if (!is_full_collection) {
504 504 size_t total_cards = _pending_cards_at_gc_start + cr_cards + mut_cards;
505 505 assert(_pending_cards_at_prev_gc_end <= total_cards,
506 506 "untracked cards: last pending: " SIZE_FORMAT
507 507 ", pending: " SIZE_FORMAT ", conc refine: " SIZE_FORMAT
508 508 ", mut refine:" SIZE_FORMAT,
509 509 _pending_cards_at_prev_gc_end, _pending_cards_at_gc_start,
510 510 cr_cards, mut_cards);
511 511 size_t logged_cards = total_cards - _pending_cards_at_prev_gc_end;
512 512 double logging_start_time = _analytics->prev_collection_pause_end_ms();
513 513 double logging_end_time = Ticks::now().seconds() * MILLIUNITS;
514 514 double logging_time = logging_end_time - logging_start_time;
515 515 // Unlike above for conc-refine rate, here we should not require a
516 516 // non-empty sample, since an application could go some time with only
517 517 // young-gen or filtered out writes. But we'll ignore unusually short
518 518 // sample periods, as they may just pollute the predictions.
519 519 if (logging_time > 1.0) { // Require > 1ms sample time.
520 520 _analytics->report_logged_cards_rate_ms(logged_cards / logging_time);
521 521 }
522 522 }
523 523 }
524 524
525 525 void G1Policy::record_collection_pause_start(double start_time_sec) {
526 526 // We only need to do this here as the policy will only be applied
527 527 // to the GC we're about to start. so, no point is calculating this
528 528 // every time we calculate / recalculate the target young length.
529 529 update_survivors_policy();
530 530
531 531 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
532 532 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
533 533 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
534 534 assert_used_and_recalculate_used_equal(_g1h);
535 535
536 536 phase_times()->record_cur_collection_start_sec(start_time_sec);
537 537
538 538 record_concurrent_refinement_data(false /* is_full_collection */);
539 539
540 540 _collection_set->reset_bytes_used_before();
541 541
542 542 // do that for any other surv rate groups
543 543 _eden_surv_rate_group->stop_adding_regions();
544 544 _survivors_age_table.clear();
545 545
546 546 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
547 547 }
548 548
549 549 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
550 550 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
551 551 collector_state()->set_in_initial_mark_gc(false);
552 552 }
553 553
554 554 void G1Policy::record_concurrent_mark_remark_start() {
555 555 _mark_remark_start_sec = os::elapsedTime();
556 556 }
557 557
558 558 void G1Policy::record_concurrent_mark_remark_end() {
559 559 double end_time_sec = os::elapsedTime();
560 560 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
561 561 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
562 562 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
563 563
564 564 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
565 565 }
566 566
567 567 void G1Policy::record_concurrent_mark_cleanup_start() {
568 568 _mark_cleanup_start_sec = os::elapsedTime();
569 569 }
570 570
571 571 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
572 572 return phase_times()->average_time_ms(phase);
573 573 }
574 574
575 575 double G1Policy::young_other_time_ms() const {
576 576 return phase_times()->young_cset_choice_time_ms() +
577 577 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
578 578 }
579 579
580 580 double G1Policy::non_young_other_time_ms() const {
581 581 return phase_times()->non_young_cset_choice_time_ms() +
582 582 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
583 583 }
584 584
585 585 double G1Policy::other_time_ms(double pause_time_ms) const {
586 586 return pause_time_ms - phase_times()->cur_collection_par_time_ms();
587 587 }
588 588
589 589 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
590 590 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms() - phase_times()->total_rebuild_freelist_time_ms();
591 591 }
592 592
593 593 bool G1Policy::about_to_start_mixed_phase() const {
594 594 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
595 595 }
596 596
597 597 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
598 598 if (about_to_start_mixed_phase()) {
599 599 return false;
600 600 }
601 601
602 602 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
603 603
604 604 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
605 605 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
606 606 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
607 607
608 608 bool result = false;
609 609 if (marking_request_bytes > marking_initiating_used_threshold) {
610 610 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
611 611 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
612 612 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
613 613 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
614 614 }
615 615
616 616 return result;
617 617 }
618 618
619 619 double G1Policy::logged_cards_processing_time() const {
620 620 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
621 621 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
622 622 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
623 623 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
624 624 // This may happen if there are duplicate cards in different log buffers.
625 625 if (logged_dirty_cards > scan_heap_roots_cards) {
626 626 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
627 627 }
628 628 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
629 629 }
630 630
631 631 // Anything below that is considered to be zero
632 632 #define MIN_TIMER_GRANULARITY 0.0000001
633 633
634 634 void G1Policy::record_collection_pause_end(double pause_time_ms) {
635 635 G1GCPhaseTimes* p = phase_times();
636 636
637 637 double end_time_sec = os::elapsedTime();
638 638
639 639 bool this_pause_included_initial_mark = false;
640 640 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
641 641
642 642 bool update_stats = !_g1h->evacuation_failed();
643 643
644 644 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
645 645
646 646 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
647 647
648 648 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
649 649 if (this_pause_included_initial_mark) {
650 650 record_concurrent_mark_init_end(0.0);
651 651 } else {
652 652 maybe_start_marking();
653 653 }
654 654
655 655 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
656 656 if (app_time_ms < MIN_TIMER_GRANULARITY) {
657 657 // This usually happens due to the timer not having the required
658 658 // granularity. Some Linuxes are the usual culprits.
659 659 // We'll just set it to something (arbitrarily) small.
660 660 app_time_ms = 1.0;
661 661 }
662 662
663 663 if (update_stats) {
664 664 // We maintain the invariant that all objects allocated by mutator
665 665 // threads will be allocated out of eden regions. So, we can use
666 666 // the eden region number allocated since the previous GC to
667 667 // calculate the application's allocate rate. The only exception
668 668 // to that is humongous objects that are allocated separately. But
669 669 // given that humongous object allocations do not really affect
670 670 // either the pause's duration nor when the next pause will take
671 671 // place we can safely ignore them here.
↓ open down ↓ |
671 lines elided |
↑ open up ↑ |
672 672 uint regions_allocated = _collection_set->eden_region_length();
673 673 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
674 674 _analytics->report_alloc_rate_ms(alloc_rate_ms);
675 675
676 676 double interval_ms =
677 677 (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
678 678 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
679 679 _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
680 680 }
681 681
682 + if (collector_state()->finish_of_mixed_gc()) {
683 + collector_state()->set_finish_of_mixed_gc(false);
684 + }
682 685 if (collector_state()->in_young_gc_before_mixed()) {
683 686 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
684 687 // This has been the young GC before we start doing mixed GCs. We already
685 688 // decided to start mixed GCs much earlier, so there is nothing to do except
686 689 // advancing the state.
687 690 collector_state()->set_in_young_only_phase(false);
688 691 collector_state()->set_in_young_gc_before_mixed(false);
689 692 } else if (!this_pause_was_young_only) {
690 693 // This is a mixed GC. Here we decide whether to continue doing more
691 694 // mixed GCs or not.
692 695 if (!next_gc_should_be_mixed("continue mixed GCs",
693 696 "do not continue mixed GCs")) {
694 697 collector_state()->set_in_young_only_phase(true);
698 + collector_state()->set_finish_of_mixed_gc(true);
695 699
696 700 clear_collection_set_candidates();
697 701 maybe_start_marking();
698 702 }
699 703 }
700 704
701 705 _eden_surv_rate_group->start_adding_regions();
702 706
703 707 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
704 708 if (update_stats) {
705 709 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
706 710 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
707 711 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
708 712 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
709 713 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
710 714 total_log_buffer_cards;
711 715
712 716 // The threshold for the number of cards in a given sampling which we consider
713 717 // large enough so that the impact from setup and other costs is negligible.
714 718 size_t const CardsNumSamplingThreshold = 10;
715 719
716 720 if (total_cards_merged > CardsNumSamplingThreshold) {
717 721 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
718 722 average_time_ms(G1GCPhaseTimes::MergeRS) +
719 723 average_time_ms(G1GCPhaseTimes::MergeHCC) +
720 724 average_time_ms(G1GCPhaseTimes::MergeLB) +
721 725 average_time_ms(G1GCPhaseTimes::OptMergeRS);
722 726 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
723 727 }
724 728
725 729 // Update prediction for card scan
726 730 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
727 731 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
728 732
729 733 if (total_cards_scanned > CardsNumSamplingThreshold) {
730 734 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
731 735 average_time_ms(G1GCPhaseTimes::OptScanHR);
732 736
733 737 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only);
734 738 }
735 739
736 740 // Update prediction for the ratio between cards from the remembered
737 741 // sets and actually scanned cards from the remembered sets.
738 742 // Cards from the remembered sets are all cards not duplicated by cards from
739 743 // the logs.
740 744 // Due to duplicates in the log buffers, the number of actually scanned cards
741 745 // can be smaller than the cards in the log buffers.
742 746 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0;
743 747 double merge_to_scan_ratio = 0.0;
744 748 if (total_cards_scanned > 0) {
745 749 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
746 750 }
747 751 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only);
748 752
749 753 const size_t recorded_rs_length = _collection_set->recorded_rs_length();
750 754 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
751 755 _analytics->report_rs_length_diff(rs_length_diff);
752 756
753 757 // Update prediction for copy cost per byte
754 758 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
755 759
756 760 if (copied_bytes > 0) {
757 761 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
758 762 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
759 763 }
760 764
761 765 if (_collection_set->young_region_length() > 0) {
762 766 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
763 767 _collection_set->young_region_length());
764 768 }
765 769
766 770 if (_collection_set->old_region_length() > 0) {
767 771 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
768 772 _collection_set->old_region_length());
769 773 }
770 774
771 775 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
772 776
773 777 // Do not update RS lengths and the number of pending cards with information from mixed gc:
774 778 // these are is wildly different to during young only gc and mess up young gen sizing right
775 779 // after the mixed gc phase.
776 780 // During mixed gc we do not use them for young gen sizing.
777 781 if (this_pause_was_young_only) {
778 782 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
779 783 _analytics->report_rs_length((double) _rs_length);
780 784 }
781 785 }
782 786
783 787 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
784 788 "If the last pause has been an initial mark, we should not have been in the marking window");
785 789 if (this_pause_included_initial_mark) {
786 790 collector_state()->set_mark_or_rebuild_in_progress(true);
787 791 }
788 792
789 793 _free_regions_at_end_of_collection = _g1h->num_free_regions();
790 794
791 795 update_rs_length_prediction();
792 796
793 797 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
794 798 // that in this case we are not running in a "normal" operating mode.
795 799 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
796 800 // IHOP control wants to know the expected young gen length if it were not
797 801 // restrained by the heap reserve. Using the actual length would make the
798 802 // prediction too small and the limit the young gen every time we get to the
799 803 // predicted target occupancy.
800 804 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
801 805
802 806 update_ihop_prediction(app_time_ms / 1000.0,
803 807 _bytes_allocated_in_old_since_last_gc,
804 808 last_unrestrained_young_length * HeapRegion::GrainBytes,
805 809 this_pause_was_young_only);
806 810 _bytes_allocated_in_old_since_last_gc = 0;
807 811
808 812 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
809 813 } else {
810 814 // Any garbage collection triggered as periodic collection resets the time-to-mixed
811 815 // measurement. Periodic collection typically means that the application is "inactive", i.e.
812 816 // the marking threads may have received an uncharacterisic amount of cpu time
813 817 // for completing the marking, i.e. are faster than expected.
814 818 // This skews the predicted marking length towards smaller values which might cause
815 819 // the mark start being too late.
816 820 _initial_mark_to_mixed.reset();
817 821 }
818 822
819 823 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
820 824 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
821 825
822 826 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
823 827 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
824 828 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
825 829 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
826 830
827 831 scan_logged_cards_time_goal_ms = 0;
828 832 } else {
829 833 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
830 834 }
831 835
832 836 _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
833 837 double const logged_cards_time = logged_cards_processing_time();
834 838
835 839 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
836 840 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
837 841
838 842 _g1h->concurrent_refine()->adjust(logged_cards_time,
839 843 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
840 844 scan_logged_cards_time_goal_ms);
841 845 }
842 846
843 847 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
844 848 if (G1UseAdaptiveIHOP) {
845 849 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
846 850 predictor,
847 851 G1ReservePercent,
848 852 G1HeapWastePercent);
849 853 } else {
850 854 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
851 855 }
852 856 }
853 857
854 858 void G1Policy::update_ihop_prediction(double mutator_time_s,
855 859 size_t mutator_alloc_bytes,
856 860 size_t young_gen_size,
857 861 bool this_gc_was_young_only) {
858 862 // Always try to update IHOP prediction. Even evacuation failures give information
859 863 // about e.g. whether to start IHOP earlier next time.
860 864
861 865 // Avoid using really small application times that might create samples with
862 866 // very high or very low values. They may be caused by e.g. back-to-back gcs.
863 867 double const min_valid_time = 1e-6;
864 868
865 869 bool report = false;
866 870
867 871 double marking_to_mixed_time = -1.0;
868 872 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
869 873 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
870 874 assert(marking_to_mixed_time > 0.0,
871 875 "Initial mark to mixed time must be larger than zero but is %.3f",
872 876 marking_to_mixed_time);
873 877 if (marking_to_mixed_time > min_valid_time) {
874 878 _ihop_control->update_marking_length(marking_to_mixed_time);
875 879 report = true;
876 880 }
877 881 }
878 882
879 883 // As an approximation for the young gc promotion rates during marking we use
880 884 // all of them. In many applications there are only a few if any young gcs during
881 885 // marking, which makes any prediction useless. This increases the accuracy of the
882 886 // prediction.
883 887 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
884 888 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
885 889 report = true;
886 890 }
887 891
888 892 if (report) {
889 893 report_ihop_statistics();
890 894 }
891 895 }
892 896
893 897 void G1Policy::report_ihop_statistics() {
894 898 _ihop_control->print();
895 899 }
896 900
897 901 void G1Policy::print_phases() {
898 902 phase_times()->print();
899 903 }
900 904
901 905 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
902 906 size_t rs_length) const {
903 907 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
904 908 return
905 909 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) +
906 910 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) +
907 911 _analytics->predict_constant_other_time_ms() +
908 912 predict_survivor_regions_evac_time();
909 913 }
910 914
911 915 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
912 916 size_t rs_length = _analytics->predict_rs_length();
913 917 return predict_base_elapsed_time_ms(pending_cards, rs_length);
914 918 }
915 919
916 920 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
917 921 size_t bytes_to_copy;
918 922 if (!hr->is_young()) {
919 923 bytes_to_copy = hr->max_live_bytes();
920 924 } else {
921 925 bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor));
922 926 }
923 927 return bytes_to_copy;
924 928 }
925 929
926 930 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const {
927 931 if (count == 0) {
928 932 return 0.0;
929 933 }
930 934 size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes;
931 935 if (bytes_to_copy != NULL) {
932 936 *bytes_to_copy = expected_bytes;
933 937 }
934 938 return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress());
935 939 }
936 940
937 941 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const {
938 942 size_t const bytes_to_copy = predict_bytes_to_copy(hr);
939 943 return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
940 944 }
941 945
942 946 double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr,
943 947 bool for_young_gc) const {
944 948 size_t rs_length = hr->rem_set()->occupied();
945 949 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc);
946 950
947 951 double region_elapsed_time_ms =
948 952 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) +
949 953 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase());
950 954
951 955 // The prediction of the "other" time for this region is based
952 956 // upon the region type and NOT the GC type.
953 957 if (hr->is_young()) {
954 958 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
955 959 } else {
956 960 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
957 961 }
958 962 return region_elapsed_time_ms;
959 963 }
960 964
961 965 double G1Policy::predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const {
962 966 return predict_region_non_copy_time_ms(hr, for_young_gc) + predict_region_copy_time_ms(hr);
963 967 }
964 968
965 969 bool G1Policy::should_allocate_mutator_region() const {
966 970 uint young_list_length = _g1h->young_regions_count();
967 971 uint young_list_target_length = _young_list_target_length;
968 972 return young_list_length < young_list_target_length;
969 973 }
970 974
971 975 bool G1Policy::can_expand_young_list() const {
972 976 uint young_list_length = _g1h->young_regions_count();
973 977 uint young_list_max_length = _young_list_max_length;
974 978 return young_list_length < young_list_max_length;
975 979 }
976 980
977 981 bool G1Policy::use_adaptive_young_list_length() const {
978 982 return _young_gen_sizer->use_adaptive_young_list_length();
979 983 }
980 984
981 985 size_t G1Policy::desired_survivor_size(uint max_regions) const {
982 986 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
983 987 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
984 988 }
985 989
986 990 void G1Policy::print_age_table() {
987 991 _survivors_age_table.print_age_table(_tenuring_threshold);
988 992 }
989 993
990 994 void G1Policy::update_max_gc_locker_expansion() {
991 995 uint expansion_region_num = 0;
992 996 if (GCLockerEdenExpansionPercent > 0) {
993 997 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
994 998 double expansion_region_num_d = perc * (double) _young_list_target_length;
995 999 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
996 1000 // less than 1.0) we'll get 1.
997 1001 expansion_region_num = (uint) ceil(expansion_region_num_d);
998 1002 } else {
999 1003 assert(expansion_region_num == 0, "sanity");
1000 1004 }
1001 1005 _young_list_max_length = _young_list_target_length + expansion_region_num;
1002 1006 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1003 1007 }
1004 1008
1005 1009 // Calculates survivor space parameters.
1006 1010 void G1Policy::update_survivors_policy() {
1007 1011 double max_survivor_regions_d =
1008 1012 (double) _young_list_target_length / (double) SurvivorRatio;
1009 1013
1010 1014 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1011 1015 // by remaining heap). Otherwise we may cause undesired promotions as we are
1012 1016 // already getting close to end of the heap, impacting performance even more.
1013 1017 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1014 1018 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1015 1019
1016 1020 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1017 1021 if (UsePerfData) {
1018 1022 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1019 1023 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1020 1024 }
1021 1025 // The real maximum survivor size is bounded by the number of regions that can
1022 1026 // be allocated into.
1023 1027 _max_survivor_regions = MIN2(desired_max_survivor_regions,
1024 1028 _g1h->num_free_or_available_regions());
1025 1029 }
1026 1030
1027 1031 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1028 1032 // We actually check whether we are marking here and not if we are in a
1029 1033 // reclamation phase. This means that we will schedule a concurrent mark
1030 1034 // even while we are still in the process of reclaiming memory.
1031 1035 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
1032 1036 if (!during_cycle) {
1033 1037 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
1034 1038 collector_state()->set_initiate_conc_mark_if_possible(true);
1035 1039 return true;
1036 1040 } else {
1037 1041 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
1038 1042 return false;
1039 1043 }
1040 1044 }
1041 1045
1042 1046 void G1Policy::initiate_conc_mark() {
1043 1047 collector_state()->set_in_initial_mark_gc(true);
1044 1048 collector_state()->set_initiate_conc_mark_if_possible(false);
1045 1049 }
1046 1050
1047 1051 void G1Policy::decide_on_conc_mark_initiation() {
1048 1052 // We are about to decide on whether this pause will be an
1049 1053 // initial-mark pause.
1050 1054
1051 1055 // First, collector_state()->in_initial_mark_gc() should not be already set. We
1052 1056 // will set it here if we have to. However, it should be cleared by
1053 1057 // the end of the pause (it's only set for the duration of an
1054 1058 // initial-mark pause).
1055 1059 assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
1056 1060
1057 1061 if (collector_state()->initiate_conc_mark_if_possible()) {
1058 1062 // We had noticed on a previous pause that the heap occupancy has
1059 1063 // gone over the initiating threshold and we should start a
1060 1064 // concurrent marking cycle. So we might initiate one.
1061 1065
1062 1066 if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
1063 1067 // Initiate a new initial mark if there is no marking or reclamation going on.
1064 1068 initiate_conc_mark();
1065 1069 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1066 1070 } else if (_g1h->is_user_requested_concurrent_full_gc(_g1h->gc_cause())) {
1067 1071 // Initiate a user requested initial mark. An initial mark must be young only
1068 1072 // GC, so the collector state must be updated to reflect this.
1069 1073 collector_state()->set_in_young_only_phase(true);
1070 1074 collector_state()->set_in_young_gc_before_mixed(false);
1071 1075
1072 1076 // We might have ended up coming here about to start a mixed phase with a collection set
1073 1077 // active. The following remark might change the change the "evacuation efficiency" of
1074 1078 // the regions in this set, leading to failing asserts later.
1075 1079 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
1076 1080 clear_collection_set_candidates();
1077 1081 abort_time_to_mixed_tracking();
1078 1082 initiate_conc_mark();
1079 1083 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1080 1084 } else {
1081 1085 // The concurrent marking thread is still finishing up the
1082 1086 // previous cycle. If we start one right now the two cycles
1083 1087 // overlap. In particular, the concurrent marking thread might
1084 1088 // be in the process of clearing the next marking bitmap (which
1085 1089 // we will use for the next cycle if we start one). Starting a
1086 1090 // cycle now will be bad given that parts of the marking
1087 1091 // information might get cleared by the marking thread. And we
1088 1092 // cannot wait for the marking thread to finish the cycle as it
1089 1093 // periodically yields while clearing the next marking bitmap
1090 1094 // and, if it's in a yield point, it's waiting for us to
1091 1095 // finish. So, at this point we will not start a cycle and we'll
1092 1096 // let the concurrent marking thread complete the last one.
1093 1097 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1094 1098 }
1095 1099 }
1096 1100 }
1097 1101
1098 1102 void G1Policy::record_concurrent_mark_cleanup_end() {
1099 1103 G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions());
1100 1104 _collection_set->set_candidates(candidates);
1101 1105
1102 1106 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1103 1107 if (!mixed_gc_pending) {
1104 1108 clear_collection_set_candidates();
1105 1109 abort_time_to_mixed_tracking();
1106 1110 }
1107 1111 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1108 1112 collector_state()->set_mark_or_rebuild_in_progress(false);
1109 1113
1110 1114 double end_sec = os::elapsedTime();
1111 1115 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1112 1116 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1113 1117 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1114 1118
1115 1119 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1116 1120 }
1117 1121
1118 1122 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1119 1123 return percent_of(reclaimable_bytes, _g1h->capacity());
1120 1124 }
1121 1125
1122 1126 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1123 1127 virtual bool do_heap_region(HeapRegion* r) {
1124 1128 r->rem_set()->clear_locked(true /* only_cardset */);
1125 1129 return false;
1126 1130 }
1127 1131 };
1128 1132
1129 1133 void G1Policy::clear_collection_set_candidates() {
1130 1134 // Clear remembered sets of remaining candidate regions and the actual candidate
1131 1135 // set.
1132 1136 G1ClearCollectionSetCandidateRemSets cl;
1133 1137 _collection_set->candidates()->iterate(&cl);
1134 1138 _collection_set->clear_candidates();
1135 1139 }
1136 1140
1137 1141 void G1Policy::maybe_start_marking() {
1138 1142 if (need_to_start_conc_mark("end of GC")) {
1139 1143 // Note: this might have already been set, if during the last
1140 1144 // pause we decided to start a cycle but at the beginning of
1141 1145 // this pause we decided to postpone it. That's OK.
1142 1146 collector_state()->set_initiate_conc_mark_if_possible(true);
1143 1147 }
1144 1148 }
1145 1149
1146 1150 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1147 1151 assert(!collector_state()->in_full_gc(), "must be");
1148 1152 if (collector_state()->in_initial_mark_gc()) {
1149 1153 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1150 1154 return InitialMarkGC;
1151 1155 } else if (collector_state()->in_young_gc_before_mixed()) {
1152 1156 assert(!collector_state()->in_initial_mark_gc(), "must be");
1153 1157 return LastYoungGC;
1154 1158 } else if (collector_state()->in_mixed_phase()) {
1155 1159 assert(!collector_state()->in_initial_mark_gc(), "must be");
1156 1160 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1157 1161 return MixedGC;
1158 1162 } else {
1159 1163 assert(!collector_state()->in_initial_mark_gc(), "must be");
1160 1164 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1161 1165 return YoungOnlyGC;
1162 1166 }
1163 1167 }
1164 1168
1165 1169 void G1Policy::record_pause(PauseKind kind, double start, double end) {
1166 1170 // Manage the MMU tracker. For some reason it ignores Full GCs.
1167 1171 if (kind != FullGC) {
1168 1172 _mmu_tracker->add_pause(start, end);
1169 1173 }
1170 1174 // Manage the mutator time tracking from initial mark to first mixed gc.
1171 1175 switch (kind) {
1172 1176 case FullGC:
1173 1177 abort_time_to_mixed_tracking();
1174 1178 break;
1175 1179 case Cleanup:
1176 1180 case Remark:
1177 1181 case YoungOnlyGC:
1178 1182 case LastYoungGC:
1179 1183 _initial_mark_to_mixed.add_pause(end - start);
1180 1184 break;
1181 1185 case InitialMarkGC:
1182 1186 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1183 1187 _initial_mark_to_mixed.record_initial_mark_end(end);
1184 1188 }
1185 1189 break;
1186 1190 case MixedGC:
1187 1191 _initial_mark_to_mixed.record_mixed_gc_start(start);
1188 1192 break;
1189 1193 default:
1190 1194 ShouldNotReachHere();
1191 1195 }
1192 1196 }
1193 1197
1194 1198 void G1Policy::abort_time_to_mixed_tracking() {
1195 1199 _initial_mark_to_mixed.reset();
1196 1200 }
1197 1201
1198 1202 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1199 1203 const char* false_action_str) const {
1200 1204 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1201 1205
1202 1206 if (candidates->is_empty()) {
1203 1207 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1204 1208 return false;
1205 1209 }
1206 1210
1207 1211 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1208 1212 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1209 1213 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1210 1214 double threshold = (double) G1HeapWastePercent;
1211 1215 if (reclaimable_percent <= threshold) {
1212 1216 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1213 1217 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1214 1218 return false;
1215 1219 }
1216 1220 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1217 1221 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1218 1222 return true;
1219 1223 }
1220 1224
1221 1225 uint G1Policy::calc_min_old_cset_length() const {
1222 1226 // The min old CSet region bound is based on the maximum desired
1223 1227 // number of mixed GCs after a cycle. I.e., even if some old regions
1224 1228 // look expensive, we should add them to the CSet anyway to make
1225 1229 // sure we go through the available old regions in no more than the
1226 1230 // maximum desired number of mixed GCs.
1227 1231 //
1228 1232 // The calculation is based on the number of marked regions we added
1229 1233 // to the CSet candidates in the first place, not how many remain, so
1230 1234 // that the result is the same during all mixed GCs that follow a cycle.
1231 1235
1232 1236 const size_t region_num = _collection_set->candidates()->num_regions();
1233 1237 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1234 1238 size_t result = region_num / gc_num;
1235 1239 // emulate ceiling
1236 1240 if (result * gc_num < region_num) {
1237 1241 result += 1;
1238 1242 }
1239 1243 return (uint) result;
1240 1244 }
1241 1245
1242 1246 uint G1Policy::calc_max_old_cset_length() const {
1243 1247 // The max old CSet region bound is based on the threshold expressed
1244 1248 // as a percentage of the heap size. I.e., it should bound the
1245 1249 // number of old regions added to the CSet irrespective of how many
1246 1250 // of them are available.
1247 1251
1248 1252 const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1249 1253 const size_t region_num = g1h->num_regions();
1250 1254 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1251 1255 size_t result = region_num * perc / 100;
1252 1256 // emulate ceiling
1253 1257 if (100 * result < region_num * perc) {
1254 1258 result += 1;
1255 1259 }
1256 1260 return (uint) result;
1257 1261 }
1258 1262
1259 1263 void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
1260 1264 double time_remaining_ms,
1261 1265 uint& num_initial_regions,
1262 1266 uint& num_optional_regions) {
1263 1267 assert(candidates != NULL, "Must be");
1264 1268
1265 1269 num_initial_regions = 0;
1266 1270 num_optional_regions = 0;
1267 1271 uint num_expensive_regions = 0;
1268 1272
1269 1273 double predicted_old_time_ms = 0.0;
1270 1274 double predicted_initial_time_ms = 0.0;
1271 1275 double predicted_optional_time_ms = 0.0;
1272 1276
1273 1277 double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction();
1274 1278
1275 1279 const uint min_old_cset_length = calc_min_old_cset_length();
1276 1280 const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length());
1277 1281 const uint max_optional_regions = max_old_cset_length - min_old_cset_length;
1278 1282 bool check_time_remaining = use_adaptive_young_list_length();
1279 1283
1280 1284 uint candidate_idx = candidates->cur_idx();
1281 1285
1282 1286 log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, "
1283 1287 "time remaining %1.2fms, optional threshold %1.2fms",
1284 1288 min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
1285 1289
1286 1290 HeapRegion* hr = candidates->at(candidate_idx);
1287 1291 while (hr != NULL) {
1288 1292 if (num_initial_regions + num_optional_regions >= max_old_cset_length) {
1289 1293 // Added maximum number of old regions to the CSet.
1290 1294 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). "
1291 1295 "Initial %u regions, optional %u regions",
1292 1296 num_initial_regions, num_optional_regions);
1293 1297 break;
1294 1298 }
1295 1299
1296 1300 // Stop adding regions if the remaining reclaimable space is
1297 1301 // not above G1HeapWastePercent.
1298 1302 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1299 1303 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1300 1304 double threshold = (double) G1HeapWastePercent;
1301 1305 if (reclaimable_percent <= threshold) {
1302 1306 // We've added enough old regions that the amount of uncollected
1303 1307 // reclaimable space is at or below the waste threshold. Stop
1304 1308 // adding old regions to the CSet.
1305 1309 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). "
1306 1310 "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
1307 1311 byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
1308 1312 reclaimable_percent, G1HeapWastePercent);
1309 1313 break;
1310 1314 }
1311 1315
1312 1316 double predicted_time_ms = predict_region_total_time_ms(hr, false);
1313 1317 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
1314 1318 // Add regions to old set until we reach the minimum amount
1315 1319 if (num_initial_regions < min_old_cset_length) {
1316 1320 predicted_old_time_ms += predicted_time_ms;
1317 1321 num_initial_regions++;
1318 1322 // Record the number of regions added with no time remaining
1319 1323 if (time_remaining_ms == 0.0) {
1320 1324 num_expensive_regions++;
1321 1325 }
1322 1326 } else if (!check_time_remaining) {
1323 1327 // In the non-auto-tuning case, we'll finish adding regions
1324 1328 // to the CSet if we reach the minimum.
1325 1329 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
1326 1330 break;
1327 1331 } else {
1328 1332 // Keep adding regions to old set until we reach the optional threshold
1329 1333 if (time_remaining_ms > optional_threshold_ms) {
1330 1334 predicted_old_time_ms += predicted_time_ms;
1331 1335 num_initial_regions++;
1332 1336 } else if (time_remaining_ms > 0) {
1333 1337 // Keep adding optional regions until time is up.
1334 1338 assert(num_optional_regions < max_optional_regions, "Should not be possible.");
1335 1339 predicted_optional_time_ms += predicted_time_ms;
1336 1340 num_optional_regions++;
1337 1341 } else {
1338 1342 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high).");
1339 1343 break;
1340 1344 }
1341 1345 }
1342 1346 hr = candidates->at(++candidate_idx);
1343 1347 }
1344 1348 if (hr == NULL) {
1345 1349 log_debug(gc, ergo, cset)("Old candidate collection set empty.");
1346 1350 }
1347 1351
1348 1352 if (num_expensive_regions > 0) {
1349 1353 log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.",
1350 1354 num_expensive_regions);
1351 1355 }
1352 1356
1353 1357 log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
1354 1358 "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
1355 1359 num_initial_regions, num_optional_regions,
1356 1360 predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
1357 1361 }
1358 1362
1359 1363 void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
1360 1364 uint const max_optional_regions,
1361 1365 double time_remaining_ms,
1362 1366 uint& num_optional_regions) {
1363 1367 assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
1364 1368
1365 1369 num_optional_regions = 0;
1366 1370 double prediction_ms = 0;
1367 1371 uint candidate_idx = candidates->cur_idx();
1368 1372
1369 1373 HeapRegion* r = candidates->at(candidate_idx);
1370 1374 while (num_optional_regions < max_optional_regions) {
1371 1375 assert(r != NULL, "Region must exist");
1372 1376 prediction_ms += predict_region_total_time_ms(r, false);
1373 1377
1374 1378 if (prediction_ms > time_remaining_ms) {
1375 1379 log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
1376 1380 prediction_ms, r->hrm_index(), time_remaining_ms);
1377 1381 break;
1378 1382 }
1379 1383 // This region will be included in the next optional evacuation.
1380 1384
1381 1385 time_remaining_ms -= prediction_ms;
1382 1386 num_optional_regions++;
1383 1387 r = candidates->at(++candidate_idx);
1384 1388 }
1385 1389
1386 1390 log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
1387 1391 num_optional_regions, max_optional_regions, prediction_ms);
1388 1392 }
1389 1393
1390 1394 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
1391 1395 note_start_adding_survivor_regions();
1392 1396
1393 1397 HeapRegion* last = NULL;
1394 1398 for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1395 1399 it != survivors->regions()->end();
1396 1400 ++it) {
1397 1401 HeapRegion* curr = *it;
1398 1402 set_region_survivor(curr);
1399 1403
1400 1404 // The region is a non-empty survivor so let's add it to
1401 1405 // the incremental collection set for the next evacuation
1402 1406 // pause.
↓ open down ↓ |
698 lines elided |
↑ open up ↑ |
1403 1407 _collection_set->add_survivor_regions(curr);
1404 1408
1405 1409 last = curr;
1406 1410 }
1407 1411 note_stop_adding_survivor_regions();
1408 1412
1409 1413 // Don't clear the survivor list handles until the start of
1410 1414 // the next evacuation pause - we need it in order to re-tag
1411 1415 // the survivor regions from this evacuation pause as 'young'
1412 1416 // at the start of the next.
1417 +}
1418 +
1419 +size_t G1Policy::minimum_desired_bytes_after_concurrent_mark(size_t used_bytes) {
1420 + size_t minimum_desired_buffer_size = _ihop_control->predict_unstrained_buffer_size();
1421 + return minimum_desired_buffer_size != 0 ?
1422 + minimum_desired_buffer_size : _young_list_max_length * HeapRegion::GrainBytes
1423 + + _reserve_regions * HeapRegion::GrainBytes + used_bytes;
1413 1424 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX