45 #include "logging/logStream.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "utilities/debug.hpp"
50 #include "utilities/growableArray.hpp"
51 #include "utilities/pair.hpp"
52
53 G1Policy::G1Policy(STWGCTimer* gc_timer) :
54 _predictor(G1ConfidencePercent / 100.0),
55 _analytics(new G1Analytics(&_predictor)),
56 _remset_tracker(),
57 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
58 _ihop_control(create_ihop_control(&_predictor)),
59 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
60 _full_collection_start_sec(0.0),
61 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
62 _young_list_target_length(0),
63 _young_list_fixed_length(0),
64 _young_list_max_length(0),
65 _short_lived_surv_rate_group(new SurvRateGroup()),
66 _survivor_surv_rate_group(new SurvRateGroup()),
67 _reserve_factor((double) G1ReservePercent / 100.0),
68 _reserve_regions(0),
69 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
70 _free_regions_at_end_of_collection(0),
71 _rs_length(0),
72 _rs_length_prediction(0),
73 _pending_cards_at_gc_start(0),
74 _pending_cards_at_prev_gc_end(0),
75 _total_mutator_refined_cards(0),
76 _total_concurrent_refined_cards(0),
77 _total_concurrent_refinement_time(),
78 _bytes_allocated_in_old_since_last_gc(0),
79 _initial_mark_to_mixed(),
80 _collection_set(NULL),
81 _g1h(NULL),
82 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
83 _mark_remark_start_sec(0),
84 _mark_cleanup_start_sec(0),
85 _tenuring_threshold(MaxTenuringThreshold),
110 assert(Heap_lock->owned_by_self(), "Locking discipline.");
111
112 if (!use_adaptive_young_list_length()) {
113 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
114 }
115 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
116
117 _free_regions_at_end_of_collection = _g1h->num_free_regions();
118
119 update_young_list_max_and_target_length();
120 // We may immediately start allocating regions and placing them on the
121 // collection set list. Initialize the per-collection set info
122 _collection_set->start_incremental_building();
123 }
124
125 void G1Policy::note_gc_start() {
126 phase_times()->note_gc_start();
127 }
128
129 class G1YoungLengthPredictor {
130 const bool _during_cm;
131 const double _base_time_ms;
132 const double _base_free_regions;
133 const double _target_pause_time_ms;
134 const G1Policy* const _policy;
135
136 public:
137 G1YoungLengthPredictor(bool during_cm,
138 double base_time_ms,
139 double base_free_regions,
140 double target_pause_time_ms,
141 const G1Policy* policy) :
142 _during_cm(during_cm),
143 _base_time_ms(base_time_ms),
144 _base_free_regions(base_free_regions),
145 _target_pause_time_ms(target_pause_time_ms),
146 _policy(policy) {}
147
148 bool will_fit(uint young_length) const {
149 if (young_length >= _base_free_regions) {
150 // end condition 1: not enough space for the young regions
151 return false;
152 }
153
154 const double accum_surv_rate = _policy->accum_yg_surv_rate_pred((int) young_length - 1);
155 const size_t bytes_to_copy =
156 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
157 const double copy_time_ms =
158 _policy->analytics()->predict_object_copy_time_ms(bytes_to_copy, _during_cm);
159 const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
160 const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
161 if (pause_time_ms > _target_pause_time_ms) {
162 // end condition 2: prediction is over the target pause time
163 return false;
164 }
165
166 const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
167
168 // When copying, we will likely need more bytes free than is live in the region.
169 // Add some safety margin to factor in the confidence of our guess, and the
170 // natural expected waste.
171 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
172 // of the calculation: the lower the confidence, the more headroom.
173 // (100 + TargetPLABWastePct) represents the increase in expected bytes during
174 // copying due to anticipated waste in the PLABs.
175 const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
176 const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
177
178 if (expected_bytes_to_copy > free_bytes) {
286 }
287
288 // Make sure we don't go over the desired max length, nor under the
289 // desired min length. In case they clash, desired_min_length wins
290 // which is why that test is second.
291 if (young_list_target_length > desired_max_length) {
292 young_list_target_length = desired_max_length;
293 }
294 if (young_list_target_length < desired_min_length) {
295 young_list_target_length = desired_min_length;
296 }
297
298 assert(young_list_target_length > base_min_length,
299 "we should be able to allocate at least one eden region");
300 assert(young_list_target_length >= absolute_min_length, "post-condition");
301
302 result.first = young_list_target_length;
303 return result;
304 }
305
306 uint
307 G1Policy::calculate_young_list_target_length(size_t rs_length,
308 uint base_min_length,
309 uint desired_min_length,
310 uint desired_max_length) const {
311 assert(use_adaptive_young_list_length(), "pre-condition");
312 assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
313
314 // In case some edge-condition makes the desired max length too small...
315 if (desired_max_length <= desired_min_length) {
316 return desired_min_length;
317 }
318
319 // We'll adjust min_young_length and max_young_length not to include
320 // the already allocated young regions (i.e., so they reflect the
321 // min and max eden regions we'll allocate). The base_min_length
322 // will be reflected in the predictions by the
323 // survivor_regions_evac_time prediction.
324 assert(desired_min_length > base_min_length, "invariant");
325 uint min_young_length = desired_min_length - base_min_length;
326 assert(desired_max_length > base_min_length, "invariant");
327 uint max_young_length = desired_max_length - base_min_length;
328
329 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
330 const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
331 const size_t pending_cards = _analytics->predict_pending_cards();
332 const double base_time_ms =
333 predict_base_elapsed_time_ms(pending_cards, rs_length) +
334 survivor_regions_evac_time;
335 const uint available_free_regions = _free_regions_at_end_of_collection;
336 const uint base_free_regions =
337 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
338
339 // Here, we will make sure that the shortest young length that
340 // makes sense fits within the target pause time.
341
342 G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(),
343 base_time_ms,
344 base_free_regions,
345 target_pause_time_ms,
346 this);
347 if (p.will_fit(min_young_length)) {
348 // The shortest young length will fit into the target pause time;
349 // we'll now check whether the absolute maximum number of young
350 // regions will fit in the target pause time. If not, we'll do
351 // a binary search between min_young_length and max_young_length.
352 if (p.will_fit(max_young_length)) {
353 // The maximum young length will fit into the target pause time.
354 // We are done so set min young length to the maximum length (as
389 assert(min_young_length < max_young_length,
390 "otherwise we should have discovered that max_young_length "
391 "fits into the pause target and not done the binary search");
392 assert(p.will_fit(min_young_length),
393 "min_young_length, the result of the binary search, should "
394 "fit into the pause target");
395 assert(!p.will_fit(min_young_length + 1),
396 "min_young_length, the result of the binary search, should be "
397 "optimal, so no larger length should fit into the pause target");
398 }
399 } else {
400 // Even the minimum length doesn't fit into the pause time
401 // target, return it as the result nevertheless.
402 }
403 return base_min_length + min_young_length;
404 }
405
406 double G1Policy::predict_survivor_regions_evac_time() const {
407 double survivor_regions_evac_time = 0.0;
408 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
409
410 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
411 it != survivor_regions->end();
412 ++it) {
413 survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase());
414 }
415 return survivor_regions_evac_time;
416 }
417
418 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
419 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
420
421 if (rs_length > _rs_length_prediction) {
422 // add 10% to avoid having to recalculate often
423 size_t rs_length_prediction = rs_length * 1100 / 1000;
424 update_rs_length_prediction(rs_length_prediction);
425
426 update_young_list_max_and_target_length(rs_length_prediction);
427 }
428 }
429
430 void G1Policy::update_rs_length_prediction() {
431 update_rs_length_prediction(_analytics->predict_rs_length());
432 }
433
449 void G1Policy::record_full_collection_end() {
450 // Consider this like a collection pause for the purposes of allocation
451 // since last pause.
452 double end_sec = os::elapsedTime();
453 double full_gc_time_sec = end_sec - _full_collection_start_sec;
454 double full_gc_time_ms = full_gc_time_sec * 1000.0;
455
456 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
457
458 collector_state()->set_in_full_gc(false);
459
460 // "Nuke" the heuristics that control the young/mixed GC
461 // transitions and make sure we start with young GCs after the Full GC.
462 collector_state()->set_in_young_only_phase(true);
463 collector_state()->set_in_young_gc_before_mixed(false);
464 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
465 collector_state()->set_in_initial_mark_gc(false);
466 collector_state()->set_mark_or_rebuild_in_progress(false);
467 collector_state()->set_clearing_next_bitmap(false);
468
469 _short_lived_surv_rate_group->start_adding_regions();
470 // also call this on any additional surv rate groups
471
472 _free_regions_at_end_of_collection = _g1h->num_free_regions();
473 // Reset survivors SurvRateGroup.
474 _survivor_surv_rate_group->reset();
475 update_young_list_max_and_target_length();
476 update_rs_length_prediction();
477 _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
478
479 _bytes_allocated_in_old_since_last_gc = 0;
480
481 record_pause(FullGC, _full_collection_start_sec, end_sec);
482 }
483
484 void G1Policy::record_concurrent_refinement_data(bool is_full_collection) {
485 _pending_cards_at_gc_start = _g1h->pending_card_num();
486
487 // Record info about concurrent refinement thread processing.
488 G1ConcurrentRefine* cr = _g1h->concurrent_refine();
489 G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats();
536 }
537
538 void G1Policy::record_collection_pause_start(double start_time_sec) {
539 // We only need to do this here as the policy will only be applied
540 // to the GC we're about to start. so, no point is calculating this
541 // every time we calculate / recalculate the target young length.
542 update_survivors_policy();
543
544 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
545 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
546 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
547 assert_used_and_recalculate_used_equal(_g1h);
548
549 phase_times()->record_cur_collection_start_sec(start_time_sec);
550
551 record_concurrent_refinement_data(false /* is_full_collection */);
552
553 _collection_set->reset_bytes_used_before();
554
555 // do that for any other surv rate groups
556 _short_lived_surv_rate_group->stop_adding_regions();
557 _survivors_age_table.clear();
558
559 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
560 }
561
562 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
563 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
564 collector_state()->set_in_initial_mark_gc(false);
565 }
566
567 void G1Policy::record_concurrent_mark_remark_start() {
568 _mark_remark_start_sec = os::elapsedTime();
569 }
570
571 void G1Policy::record_concurrent_mark_remark_end() {
572 double end_time_sec = os::elapsedTime();
573 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
574 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
575 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
576
694
695 if (collector_state()->in_young_gc_before_mixed()) {
696 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
697 // This has been the young GC before we start doing mixed GCs. We already
698 // decided to start mixed GCs much earlier, so there is nothing to do except
699 // advancing the state.
700 collector_state()->set_in_young_only_phase(false);
701 collector_state()->set_in_young_gc_before_mixed(false);
702 } else if (!this_pause_was_young_only) {
703 // This is a mixed GC. Here we decide whether to continue doing more
704 // mixed GCs or not.
705 if (!next_gc_should_be_mixed("continue mixed GCs",
706 "do not continue mixed GCs")) {
707 collector_state()->set_in_young_only_phase(true);
708
709 clear_collection_set_candidates();
710 maybe_start_marking();
711 }
712 }
713
714 _short_lived_surv_rate_group->start_adding_regions();
715
716 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
717 if (update_stats) {
718 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
719 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
720 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
721 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
722 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
723 total_log_buffer_cards;
724
725 if (total_cards_merged > 10) {
726 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
727 average_time_ms(G1GCPhaseTimes::MergeRS) +
728 average_time_ms(G1GCPhaseTimes::MergeHCC) +
729 average_time_ms(G1GCPhaseTimes::MergeLB) +
730 average_time_ms(G1GCPhaseTimes::OptMergeRS);
731 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
732 }
733
734 // Update prediction for card scan
890 // marking, which makes any prediction useless. This increases the accuracy of the
891 // prediction.
892 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
893 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
894 report = true;
895 }
896
897 if (report) {
898 report_ihop_statistics();
899 }
900 }
901
902 void G1Policy::report_ihop_statistics() {
903 _ihop_control->print();
904 }
905
906 void G1Policy::print_phases() {
907 phase_times()->print();
908 }
909
910 double G1Policy::accum_yg_surv_rate_pred(int age) const {
911 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
912 }
913
914 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
915 size_t rs_length) const {
916 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
917 return
918 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) +
919 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) +
920 _analytics->predict_constant_other_time_ms();
921 }
922
923 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
924 size_t rs_length = _analytics->predict_rs_length();
925 return predict_base_elapsed_time_ms(pending_cards, rs_length);
926 }
927
928 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
929 size_t bytes_to_copy;
930 if (!hr->is_young()) {
931 bytes_to_copy = hr->max_live_bytes();
932 } else {
933 bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor));
934 }
935 return bytes_to_copy;
936 }
937
938 double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
939 bool for_young_gc) const {
940 size_t rs_length = hr->rem_set()->occupied();
941 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc);
942
943 size_t bytes_to_copy = predict_bytes_to_copy(hr);
944
945 double region_elapsed_time_ms =
946 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) +
947 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase()) +
948 _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
949
950 // The prediction of the "other" time for this region is based
951 // upon the region type and NOT the GC type.
952 if (hr->is_young()) {
953 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
954 } else {
955 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
956 }
957 return region_elapsed_time_ms;
958 }
959
960 bool G1Policy::should_allocate_mutator_region() const {
961 uint young_list_length = _g1h->young_regions_count();
962 uint young_list_target_length = _young_list_target_length;
963 return young_list_length < young_list_target_length;
964 }
965
966 bool G1Policy::can_expand_young_list() const {
967 uint young_list_length = _g1h->young_regions_count();
968 uint young_list_max_length = _young_list_max_length;
969 return young_list_length < young_list_max_length;
970 }
971
972 bool G1Policy::use_adaptive_young_list_length() const {
973 return _young_gen_sizer->use_adaptive_young_list_length();
974 }
975
976 size_t G1Policy::desired_survivor_size(uint max_regions) const {
977 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
978 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
979 }
1287 num_initial_regions, num_optional_regions);
1288 break;
1289 }
1290
1291 // Stop adding regions if the remaining reclaimable space is
1292 // not above G1HeapWastePercent.
1293 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1294 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1295 double threshold = (double) G1HeapWastePercent;
1296 if (reclaimable_percent <= threshold) {
1297 // We've added enough old regions that the amount of uncollected
1298 // reclaimable space is at or below the waste threshold. Stop
1299 // adding old regions to the CSet.
1300 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). "
1301 "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
1302 byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
1303 reclaimable_percent, G1HeapWastePercent);
1304 break;
1305 }
1306
1307 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
1308 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
1309 // Add regions to old set until we reach the minimum amount
1310 if (num_initial_regions < min_old_cset_length) {
1311 predicted_old_time_ms += predicted_time_ms;
1312 num_initial_regions++;
1313 // Record the number of regions added with no time remaining
1314 if (time_remaining_ms == 0.0) {
1315 num_expensive_regions++;
1316 }
1317 } else if (!check_time_remaining) {
1318 // In the non-auto-tuning case, we'll finish adding regions
1319 // to the CSet if we reach the minimum.
1320 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
1321 break;
1322 } else {
1323 // Keep adding regions to old set until we reach the optional threshold
1324 if (time_remaining_ms > optional_threshold_ms) {
1325 predicted_old_time_ms += predicted_time_ms;
1326 num_initial_regions++;
1327 } else if (time_remaining_ms > 0) {
1347
1348 log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
1349 "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
1350 num_initial_regions, num_optional_regions,
1351 predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
1352 }
1353
1354 void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
1355 uint const max_optional_regions,
1356 double time_remaining_ms,
1357 uint& num_optional_regions) {
1358 assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
1359
1360 num_optional_regions = 0;
1361 double prediction_ms = 0;
1362 uint candidate_idx = candidates->cur_idx();
1363
1364 HeapRegion* r = candidates->at(candidate_idx);
1365 while (num_optional_regions < max_optional_regions) {
1366 assert(r != NULL, "Region must exist");
1367 prediction_ms += predict_region_elapsed_time_ms(r, false);
1368
1369 if (prediction_ms > time_remaining_ms) {
1370 log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
1371 prediction_ms, r->hrm_index(), time_remaining_ms);
1372 break;
1373 }
1374 // This region will be included in the next optional evacuation.
1375
1376 time_remaining_ms -= prediction_ms;
1377 num_optional_regions++;
1378 r = candidates->at(++candidate_idx);
1379 }
1380
1381 log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
1382 num_optional_regions, max_optional_regions, prediction_ms);
1383 }
1384
1385 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
1386
1387 // Add survivor regions to SurvRateGroup.
|
45 #include "logging/logStream.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "utilities/debug.hpp"
50 #include "utilities/growableArray.hpp"
51 #include "utilities/pair.hpp"
52
53 G1Policy::G1Policy(STWGCTimer* gc_timer) :
54 _predictor(G1ConfidencePercent / 100.0),
55 _analytics(new G1Analytics(&_predictor)),
56 _remset_tracker(),
57 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
58 _ihop_control(create_ihop_control(&_predictor)),
59 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
60 _full_collection_start_sec(0.0),
61 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
62 _young_list_target_length(0),
63 _young_list_fixed_length(0),
64 _young_list_max_length(0),
65 _eden_surv_rate_group(new SurvRateGroup()),
66 _survivor_surv_rate_group(new SurvRateGroup()),
67 _reserve_factor((double) G1ReservePercent / 100.0),
68 _reserve_regions(0),
69 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
70 _free_regions_at_end_of_collection(0),
71 _rs_length(0),
72 _rs_length_prediction(0),
73 _pending_cards_at_gc_start(0),
74 _pending_cards_at_prev_gc_end(0),
75 _total_mutator_refined_cards(0),
76 _total_concurrent_refined_cards(0),
77 _total_concurrent_refinement_time(),
78 _bytes_allocated_in_old_since_last_gc(0),
79 _initial_mark_to_mixed(),
80 _collection_set(NULL),
81 _g1h(NULL),
82 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
83 _mark_remark_start_sec(0),
84 _mark_cleanup_start_sec(0),
85 _tenuring_threshold(MaxTenuringThreshold),
110 assert(Heap_lock->owned_by_self(), "Locking discipline.");
111
112 if (!use_adaptive_young_list_length()) {
113 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
114 }
115 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
116
117 _free_regions_at_end_of_collection = _g1h->num_free_regions();
118
119 update_young_list_max_and_target_length();
120 // We may immediately start allocating regions and placing them on the
121 // collection set list. Initialize the per-collection set info
122 _collection_set->start_incremental_building();
123 }
124
125 void G1Policy::note_gc_start() {
126 phase_times()->note_gc_start();
127 }
128
129 class G1YoungLengthPredictor {
130 const double _base_time_ms;
131 const double _base_free_regions;
132 const double _target_pause_time_ms;
133 const G1Policy* const _policy;
134
135 public:
136 G1YoungLengthPredictor(bool during_cm,
137 double base_time_ms,
138 double base_free_regions,
139 double target_pause_time_ms,
140 const G1Policy* policy) :
141 _base_time_ms(base_time_ms),
142 _base_free_regions(base_free_regions),
143 _target_pause_time_ms(target_pause_time_ms),
144 _policy(policy) {}
145
146 bool will_fit(uint young_length) const {
147 if (young_length >= _base_free_regions) {
148 // end condition 1: not enough space for the young regions
149 return false;
150 }
151
152 size_t bytes_to_copy = 0;
153 const double copy_time_ms = _policy->predict_eden_copy_time_ms(young_length, &bytes_to_copy);
154 const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
155 const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
156 if (pause_time_ms > _target_pause_time_ms) {
157 // end condition 2: prediction is over the target pause time
158 return false;
159 }
160
161 const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
162
163 // When copying, we will likely need more bytes free than is live in the region.
164 // Add some safety margin to factor in the confidence of our guess, and the
165 // natural expected waste.
166 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
167 // of the calculation: the lower the confidence, the more headroom.
168 // (100 + TargetPLABWastePct) represents the increase in expected bytes during
169 // copying due to anticipated waste in the PLABs.
170 const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
171 const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
172
173 if (expected_bytes_to_copy > free_bytes) {
281 }
282
283 // Make sure we don't go over the desired max length, nor under the
284 // desired min length. In case they clash, desired_min_length wins
285 // which is why that test is second.
286 if (young_list_target_length > desired_max_length) {
287 young_list_target_length = desired_max_length;
288 }
289 if (young_list_target_length < desired_min_length) {
290 young_list_target_length = desired_min_length;
291 }
292
293 assert(young_list_target_length > base_min_length,
294 "we should be able to allocate at least one eden region");
295 assert(young_list_target_length >= absolute_min_length, "post-condition");
296
297 result.first = young_list_target_length;
298 return result;
299 }
300
301 uint G1Policy::calculate_young_list_target_length(size_t rs_length,
302 uint base_min_length,
303 uint desired_min_length,
304 uint desired_max_length) const {
305 assert(use_adaptive_young_list_length(), "pre-condition");
306 assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
307
308 // In case some edge-condition makes the desired max length too small...
309 if (desired_max_length <= desired_min_length) {
310 return desired_min_length;
311 }
312
313 // We'll adjust min_young_length and max_young_length not to include
314 // the already allocated young regions (i.e., so they reflect the
315 // min and max eden regions we'll allocate). The base_min_length
316 // will be reflected in the predictions by the
317 // survivor_regions_evac_time prediction.
318 assert(desired_min_length > base_min_length, "invariant");
319 uint min_young_length = desired_min_length - base_min_length;
320 assert(desired_max_length > base_min_length, "invariant");
321 uint max_young_length = desired_max_length - base_min_length;
322
323 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
324 const size_t pending_cards = _analytics->predict_pending_cards();
325 const double base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length);
326 const uint available_free_regions = _free_regions_at_end_of_collection;
327 const uint base_free_regions =
328 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
329
330 // Here, we will make sure that the shortest young length that
331 // makes sense fits within the target pause time.
332
333 G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(),
334 base_time_ms,
335 base_free_regions,
336 target_pause_time_ms,
337 this);
338 if (p.will_fit(min_young_length)) {
339 // The shortest young length will fit into the target pause time;
340 // we'll now check whether the absolute maximum number of young
341 // regions will fit in the target pause time. If not, we'll do
342 // a binary search between min_young_length and max_young_length.
343 if (p.will_fit(max_young_length)) {
344 // The maximum young length will fit into the target pause time.
345 // We are done so set min young length to the maximum length (as
380 assert(min_young_length < max_young_length,
381 "otherwise we should have discovered that max_young_length "
382 "fits into the pause target and not done the binary search");
383 assert(p.will_fit(min_young_length),
384 "min_young_length, the result of the binary search, should "
385 "fit into the pause target");
386 assert(!p.will_fit(min_young_length + 1),
387 "min_young_length, the result of the binary search, should be "
388 "optimal, so no larger length should fit into the pause target");
389 }
390 } else {
391 // Even the minimum length doesn't fit into the pause time
392 // target, return it as the result nevertheless.
393 }
394 return base_min_length + min_young_length;
395 }
396
397 double G1Policy::predict_survivor_regions_evac_time() const {
398 double survivor_regions_evac_time = 0.0;
399 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
400 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
401 it != survivor_regions->end();
402 ++it) {
403 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
404 }
405 return survivor_regions_evac_time;
406 }
407
408 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
409 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
410
411 if (rs_length > _rs_length_prediction) {
412 // add 10% to avoid having to recalculate often
413 size_t rs_length_prediction = rs_length * 1100 / 1000;
414 update_rs_length_prediction(rs_length_prediction);
415
416 update_young_list_max_and_target_length(rs_length_prediction);
417 }
418 }
419
420 void G1Policy::update_rs_length_prediction() {
421 update_rs_length_prediction(_analytics->predict_rs_length());
422 }
423
439 void G1Policy::record_full_collection_end() {
440 // Consider this like a collection pause for the purposes of allocation
441 // since last pause.
442 double end_sec = os::elapsedTime();
443 double full_gc_time_sec = end_sec - _full_collection_start_sec;
444 double full_gc_time_ms = full_gc_time_sec * 1000.0;
445
446 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
447
448 collector_state()->set_in_full_gc(false);
449
450 // "Nuke" the heuristics that control the young/mixed GC
451 // transitions and make sure we start with young GCs after the Full GC.
452 collector_state()->set_in_young_only_phase(true);
453 collector_state()->set_in_young_gc_before_mixed(false);
454 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
455 collector_state()->set_in_initial_mark_gc(false);
456 collector_state()->set_mark_or_rebuild_in_progress(false);
457 collector_state()->set_clearing_next_bitmap(false);
458
459 _eden_surv_rate_group->start_adding_regions();
460 // also call this on any additional surv rate groups
461
462 _free_regions_at_end_of_collection = _g1h->num_free_regions();
463 // Reset survivors SurvRateGroup.
464 _survivor_surv_rate_group->reset();
465 update_young_list_max_and_target_length();
466 update_rs_length_prediction();
467 _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
468
469 _bytes_allocated_in_old_since_last_gc = 0;
470
471 record_pause(FullGC, _full_collection_start_sec, end_sec);
472 }
473
474 void G1Policy::record_concurrent_refinement_data(bool is_full_collection) {
475 _pending_cards_at_gc_start = _g1h->pending_card_num();
476
477 // Record info about concurrent refinement thread processing.
478 G1ConcurrentRefine* cr = _g1h->concurrent_refine();
479 G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats();
526 }
527
528 void G1Policy::record_collection_pause_start(double start_time_sec) {
529 // We only need to do this here as the policy will only be applied
530 // to the GC we're about to start. so, no point is calculating this
531 // every time we calculate / recalculate the target young length.
532 update_survivors_policy();
533
534 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
535 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
536 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
537 assert_used_and_recalculate_used_equal(_g1h);
538
539 phase_times()->record_cur_collection_start_sec(start_time_sec);
540
541 record_concurrent_refinement_data(false /* is_full_collection */);
542
543 _collection_set->reset_bytes_used_before();
544
545 // do that for any other surv rate groups
546 _eden_surv_rate_group->stop_adding_regions();
547 _survivors_age_table.clear();
548
549 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
550 }
551
552 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
553 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
554 collector_state()->set_in_initial_mark_gc(false);
555 }
556
557 void G1Policy::record_concurrent_mark_remark_start() {
558 _mark_remark_start_sec = os::elapsedTime();
559 }
560
561 void G1Policy::record_concurrent_mark_remark_end() {
562 double end_time_sec = os::elapsedTime();
563 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
564 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
565 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
566
684
685 if (collector_state()->in_young_gc_before_mixed()) {
686 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
687 // This has been the young GC before we start doing mixed GCs. We already
688 // decided to start mixed GCs much earlier, so there is nothing to do except
689 // advancing the state.
690 collector_state()->set_in_young_only_phase(false);
691 collector_state()->set_in_young_gc_before_mixed(false);
692 } else if (!this_pause_was_young_only) {
693 // This is a mixed GC. Here we decide whether to continue doing more
694 // mixed GCs or not.
695 if (!next_gc_should_be_mixed("continue mixed GCs",
696 "do not continue mixed GCs")) {
697 collector_state()->set_in_young_only_phase(true);
698
699 clear_collection_set_candidates();
700 maybe_start_marking();
701 }
702 }
703
704 _eden_surv_rate_group->start_adding_regions();
705
706 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
707 if (update_stats) {
708 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
709 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
710 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
711 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
712 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
713 total_log_buffer_cards;
714
715 if (total_cards_merged > 10) {
716 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
717 average_time_ms(G1GCPhaseTimes::MergeRS) +
718 average_time_ms(G1GCPhaseTimes::MergeHCC) +
719 average_time_ms(G1GCPhaseTimes::MergeLB) +
720 average_time_ms(G1GCPhaseTimes::OptMergeRS);
721 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
722 }
723
724 // Update prediction for card scan
880 // marking, which makes any prediction useless. This increases the accuracy of the
881 // prediction.
882 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
883 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
884 report = true;
885 }
886
887 if (report) {
888 report_ihop_statistics();
889 }
890 }
891
892 void G1Policy::report_ihop_statistics() {
893 _ihop_control->print();
894 }
895
896 void G1Policy::print_phases() {
897 phase_times()->print();
898 }
899
900 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
901 size_t rs_length) const {
902 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
903 return
904 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) +
905 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) +
906 _analytics->predict_constant_other_time_ms() +
907 predict_survivor_regions_evac_time();
908 }
909
910 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
911 size_t rs_length = _analytics->predict_rs_length();
912 return predict_base_elapsed_time_ms(pending_cards, rs_length);
913 }
914
915 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
916 size_t bytes_to_copy;
917 if (!hr->is_young()) {
918 bytes_to_copy = hr->max_live_bytes();
919 } else {
920 bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor));
921 }
922 return bytes_to_copy;
923 }
924
925 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const {
926 if (count == 0) {
927 return 0.0;
928 }
929 size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes;
930 if (bytes_to_copy != NULL) {
931 *bytes_to_copy = expected_bytes;
932 }
933 return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress());
934 }
935
936 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const {
937 size_t const bytes_to_copy = predict_bytes_to_copy(hr);
938 return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
939 }
940
941 double G1Policy::predict_region_non_copy_time_ms(HeapRegion* hr,
942 bool for_young_gc) const {
943 size_t rs_length = hr->rem_set()->occupied();
944 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc);
945
946 double region_elapsed_time_ms =
947 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) +
948 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase());
949
950 // The prediction of the "other" time for this region is based
951 // upon the region type and NOT the GC type.
952 if (hr->is_young()) {
953 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
954 } else {
955 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
956 }
957 return region_elapsed_time_ms;
958 }
959
960 double G1Policy::predict_region_total_time_ms(HeapRegion* hr, bool for_young_gc) const {
961 return predict_region_non_copy_time_ms(hr, for_young_gc) + predict_region_copy_time_ms(hr);
962 }
963
964 bool G1Policy::should_allocate_mutator_region() const {
965 uint young_list_length = _g1h->young_regions_count();
966 uint young_list_target_length = _young_list_target_length;
967 return young_list_length < young_list_target_length;
968 }
969
970 bool G1Policy::can_expand_young_list() const {
971 uint young_list_length = _g1h->young_regions_count();
972 uint young_list_max_length = _young_list_max_length;
973 return young_list_length < young_list_max_length;
974 }
975
976 bool G1Policy::use_adaptive_young_list_length() const {
977 return _young_gen_sizer->use_adaptive_young_list_length();
978 }
979
980 size_t G1Policy::desired_survivor_size(uint max_regions) const {
981 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
982 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
983 }
1291 num_initial_regions, num_optional_regions);
1292 break;
1293 }
1294
1295 // Stop adding regions if the remaining reclaimable space is
1296 // not above G1HeapWastePercent.
1297 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1298 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1299 double threshold = (double) G1HeapWastePercent;
1300 if (reclaimable_percent <= threshold) {
1301 // We've added enough old regions that the amount of uncollected
1302 // reclaimable space is at or below the waste threshold. Stop
1303 // adding old regions to the CSet.
1304 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). "
1305 "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
1306 byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
1307 reclaimable_percent, G1HeapWastePercent);
1308 break;
1309 }
1310
1311 double predicted_time_ms = predict_region_total_time_ms(hr, false);
1312 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
1313 // Add regions to old set until we reach the minimum amount
1314 if (num_initial_regions < min_old_cset_length) {
1315 predicted_old_time_ms += predicted_time_ms;
1316 num_initial_regions++;
1317 // Record the number of regions added with no time remaining
1318 if (time_remaining_ms == 0.0) {
1319 num_expensive_regions++;
1320 }
1321 } else if (!check_time_remaining) {
1322 // In the non-auto-tuning case, we'll finish adding regions
1323 // to the CSet if we reach the minimum.
1324 log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
1325 break;
1326 } else {
1327 // Keep adding regions to old set until we reach the optional threshold
1328 if (time_remaining_ms > optional_threshold_ms) {
1329 predicted_old_time_ms += predicted_time_ms;
1330 num_initial_regions++;
1331 } else if (time_remaining_ms > 0) {
1351
1352 log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
1353 "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
1354 num_initial_regions, num_optional_regions,
1355 predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
1356 }
1357
1358 void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
1359 uint const max_optional_regions,
1360 double time_remaining_ms,
1361 uint& num_optional_regions) {
1362 assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
1363
1364 num_optional_regions = 0;
1365 double prediction_ms = 0;
1366 uint candidate_idx = candidates->cur_idx();
1367
1368 HeapRegion* r = candidates->at(candidate_idx);
1369 while (num_optional_regions < max_optional_regions) {
1370 assert(r != NULL, "Region must exist");
1371 prediction_ms += predict_region_total_time_ms(r, false);
1372
1373 if (prediction_ms > time_remaining_ms) {
1374 log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
1375 prediction_ms, r->hrm_index(), time_remaining_ms);
1376 break;
1377 }
1378 // This region will be included in the next optional evacuation.
1379
1380 time_remaining_ms -= prediction_ms;
1381 num_optional_regions++;
1382 r = candidates->at(++candidate_idx);
1383 }
1384
1385 log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
1386 num_optional_regions, max_optional_regions, prediction_ms);
1387 }
1388
1389 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
1390
1391 // Add survivor regions to SurvRateGroup.
|