--- old/src/hotspot/share/gc/g1/g1Analytics.cpp 2020-06-10 10:12:00.892904606 +0200 +++ new/src/hotspot/share/gc/g1/g1Analytics.cpp 2020-06-10 10:12:00.788901039 +0200 @@ -149,18 +149,14 @@ _alloc_rate_ms_seq->add(alloc_rate); } -void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) { - _long_term_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms; - // Filter out nonsensical results due to bad input. +void G1Analytics::compute_pause_time_ratios(double end_time_sec, double pause_time_ms) { + double long_interval_ms = (end_time_sec - oldest_known_gc_end_time_sec()) * 1000.0; + _long_term_pause_time_ratio = _recent_gc_times_ms->sum() / long_interval_ms; _long_term_pause_time_ratio = clamp(_long_term_pause_time_ratio, 0.0, 1.0); - // Compute the ratio of just this last pause time to the entire time range stored - // in the vectors. Comparing this pause to the entire range, rather than only the - // most recent interval, has the effect of smoothing over a possible transient 'burst' - // of more frequent pauses that don't really reflect a change in heap occupancy. - // This reduces the likelihood of a needless heap expansion being triggered. - _short_term_pause_time_ratio = - (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; + double short_interval_ms = (end_time_sec - most_recent_gc_end_time_sec()) * 1000.0; + _short_term_pause_time_ratio = pause_time_ms / short_interval_ms; + _short_term_pause_time_ratio = clamp(_short_term_pause_time_ratio, 0.0, 1.0); } void G1Analytics::report_concurrent_refine_rate_ms(double cards_per_ms) { @@ -228,7 +224,11 @@ } double G1Analytics::predict_alloc_rate_ms() const { - return predict_zero_bounded(_alloc_rate_ms_seq); + if (!enough_samples_available(_alloc_rate_ms_seq)) { + return predict_zero_bounded(_alloc_rate_ms_seq); + } else { + return 0.0; + } } double G1Analytics::predict_concurrent_refine_rate_ms() const { @@ -311,10 +311,14 @@ return predict_size(_pending_cards_seq); } -double G1Analytics::last_known_gc_end_time_sec() const { +double G1Analytics::oldest_known_gc_end_time_sec() const { return _recent_prev_end_times_for_all_gcs_sec->oldest(); } +double G1Analytics::most_recent_gc_end_time_sec() const { + return _recent_prev_end_times_for_all_gcs_sec->last(); +} + void G1Analytics::update_recent_gc_times(double end_time_sec, double pause_time_ms) { _recent_gc_times_ms->add(pause_time_ms); --- old/src/hotspot/share/gc/g1/g1Analytics.hpp 2020-06-10 10:12:01.328919567 +0200 +++ new/src/hotspot/share/gc/g1/g1Analytics.hpp 2020-06-10 10:12:01.236916410 +0200 @@ -88,6 +88,9 @@ size_t predict_size(TruncatedSeq const* seq) const; double predict_zero_bounded(TruncatedSeq const* seq) const; + double oldest_known_gc_end_time_sec() const; + double most_recent_gc_end_time_sec() const; + public: G1Analytics(const G1Predictions* predictor); @@ -160,9 +163,7 @@ // Add a new GC of the given duration and end time to the record. void update_recent_gc_times(double end_time_sec, double elapsed_ms); - void compute_pause_time_ratio(double interval_ms, double pause_time_ms); - - double last_known_gc_end_time_sec() const; + void compute_pause_time_ratios(double end_time_sec, double pause_time_ms); }; #endif // SHARE_GC_G1_G1ANALYTICS_HPP --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-06-10 10:12:01.760934393 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-06-10 10:12:01.668931236 +0200 @@ -1035,7 +1035,8 @@ assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); abort_refinement(); - resize_heap_if_necessary(); + + resize_heap_after_full_collection(); // Rebuild the strong code root lists for each region rebuild_strong_code_roots(); @@ -1140,7 +1141,7 @@ clear_all_soft_refs); } -void G1CollectedHeap::resize_heap_if_necessary() { +void G1CollectedHeap::resize_heap_after_full_collection() { assert_at_safepoint_on_vm_thread(); // Capacity, free and used after the GC counted as full regions to @@ -1148,33 +1149,8 @@ const size_t capacity_after_gc = capacity(); const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes(); - // This is enforced in arguments.cpp. - assert(MinHeapFreeRatio <= MaxHeapFreeRatio, - "otherwise the code below doesn't make sense"); - - // We don't have floating point command-line arguments - const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; - const double maximum_used_percentage = 1.0 - minimum_free_percentage; - const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; - const double minimum_used_percentage = 1.0 - maximum_free_percentage; - - // We have to be careful here as these two calculations can overflow - // 32-bit size_t's. - double used_after_gc_d = (double) used_after_gc; - double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; - double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; - - // Let's make sure that they are both under the max heap size, which - // by default will make them fit into a size_t. - double desired_capacity_upper_bound = (double) MaxHeapSize; - minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, - desired_capacity_upper_bound); - maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, - desired_capacity_upper_bound); - - // We can now safely turn them into size_t's. - size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; - size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; + size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio); + size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MaxHeapFreeRatio); // This assert only makes sense here, before we adjust them // with respect to the min and max heap size. @@ -1196,7 +1172,7 @@ // Don't expand unless it's significant size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; - log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). " + log_debug(gc, ergo, heap)("Heap resize. Attempt heap expansion (capacity lower than min desired capacity). " "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B " "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio); @@ -1208,7 +1184,7 @@ // Capacity too large, compute shrinking size size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; - log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). " + log_debug(gc, ergo, heap)("Heap resize. Attempt heap shrinking (capacity higher than max desired capacity). " "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B " "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio); @@ -1322,15 +1298,17 @@ } bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) { + assert(expand_bytes > 0, "must be"); + size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_up(aligned_expand_bytes, - HeapRegion::GrainBytes); + HeapRegion::GrainBytes); - log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B", + log_debug(gc, ergo, heap)("Heap resize. requested expansion amount: " SIZE_FORMAT "B aligned expansion amount: " SIZE_FORMAT "B", expand_bytes, aligned_expand_bytes); - if (is_maximal_no_gc()) { - log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); + if (capacity() == max_capacity()) { + log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)"); return false; } @@ -1347,8 +1325,11 @@ size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); policy()->record_new_heap_size(num_regions()); + + log_debug(gc, ergo, heap)("Heap resize. Requested expansion amount: " SIZE_FORMAT "B actual expansion amount: " SIZE_FORMAT "B", + aligned_expand_bytes, actual_expand_bytes); } else { - log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)"); + log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap expansion operation failed)"); // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. @@ -1366,7 +1347,7 @@ if (expanded_by == 0) { assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available()); - log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); + log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)"); return false; } @@ -1375,27 +1356,47 @@ } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { - size_t aligned_shrink_bytes = - ReservedSpace::page_align_size_down(shrink_bytes); - aligned_shrink_bytes = align_down(aligned_shrink_bytes, - HeapRegion::GrainBytes); + assert(shrink_bytes > 0, "must be"); + assert(is_aligned(shrink_bytes, HeapRegion::GrainBytes), + "Shrink request for " SIZE_FORMAT "B not aligned to heap region size " SIZE_FORMAT "B", + shrink_bytes, HeapRegion::GrainBytes); + uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; - log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", - shrink_bytes, aligned_shrink_bytes, shrunk_bytes); + log_debug(gc, ergo, heap)("Heap resize. Requested shrinking amount: " SIZE_FORMAT "B actual shrinking amount: " SIZE_FORMAT "B", + shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { policy()->record_new_heap_size(num_regions()); } else { - log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); + log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap shrinking operation failed)"); } } void G1CollectedHeap::shrink(size_t shrink_bytes) { - _verifier->verify_region_sets_optional(); + size_t aligned_shrink_bytes = ReservedSpace::page_align_size_down(shrink_bytes); + aligned_shrink_bytes = align_down(aligned_shrink_bytes, + HeapRegion::GrainBytes); + + aligned_shrink_bytes = capacity() - MAX2(capacity() - aligned_shrink_bytes, min_capacity()); + assert(is_aligned(aligned_shrink_bytes, HeapRegion::GrainBytes), "Bytes to shrink " SIZE_FORMAT "B not aligned", aligned_shrink_bytes); + + log_debug(gc, ergo, heap)("Heap resize. Requested shrink amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B", + shrink_bytes, aligned_shrink_bytes); + + if (aligned_shrink_bytes == 0) { + log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (shrink request too small)"); + return; + } + if (capacity() == min_capacity()) { + log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap already at minimum)"); + return; + } + assert(aligned_shrink_bytes > 0, "capacity " SIZE_FORMAT " min_capacity " SIZE_FORMAT, capacity(), min_capacity()); + _verifier->verify_region_sets_optional(); // We should only reach here at the end of a Full GC or during Remark which // means we should not not be holding to any GC alloc regions. The method // below will make sure of that and do any remaining clean up. @@ -1405,7 +1406,7 @@ // could instead use the remove_all_pending() method on free_list to // remove only the ones that we need to remove. tear_down_region_sets(true /* free_list_only */); - shrink_helper(shrink_bytes); + shrink_helper(aligned_shrink_bytes); rebuild_region_sets(true /* free_list_only */); _hrm->verify_optional(); @@ -2424,10 +2425,18 @@ return _hrm->max_expandable_length() * HeapRegion::GrainBytes; } +size_t G1CollectedHeap::min_capacity() const { + return MinHeapSize; +} + size_t G1CollectedHeap::max_reserved_capacity() const { return _hrm->max_length() * HeapRegion::GrainBytes; } +size_t G1CollectedHeap::soft_max_capacity() const { + return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity()); +} + jlong G1CollectedHeap::millis_since_last_gc() { // See the notes in GenCollectedHeap::millis_since_last_gc() // for more information about the implementation. @@ -2942,17 +2951,17 @@ verify_numa_regions("GC End"); } -void G1CollectedHeap::expand_heap_after_young_collection(){ - size_t expand_bytes = _heap_sizing_policy->expansion_amount(); - if (expand_bytes > 0) { - // No need for an ergo logging here, - // expansion_amount() does this when it returns a value > 0. - double expand_ms; - if (!expand(expand_bytes, _workers, &expand_ms)) { - // We failed to expand the heap. Cannot do anything about it. - } - phase_times()->record_expand_heap_time(expand_ms); +void G1CollectedHeap::resize_heap_after_young_gc() { + Ticks start = Ticks::now(); + + ssize_t resize_bytes = _heap_sizing_policy->resize_amount_after_young_gc(); + if (resize_bytes > 0) { + expand(resize_bytes, _workers, NULL); + } else if (resize_bytes < 0) { + shrink(-resize_bytes); } + + phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0); } const char* G1CollectedHeap::young_gc_name() const { @@ -3120,7 +3129,7 @@ _allocator->init_mutator_alloc_regions(); - expand_heap_after_young_collection(); + resize_heap_after_young_gc(); double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-06-10 10:12:02.304953062 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-06-10 10:12:02.216950040 +0200 @@ -566,7 +566,7 @@ return _g1mm; } - void resize_heap_if_necessary(); + void resize_heap_after_full_collection(); G1NUMA* numa() const { return _numa; } @@ -789,7 +789,8 @@ G1RedirtyCardsQueueSet* rdcqs, G1ParScanThreadStateSet* pss); - void expand_heap_after_young_collection(); + void resize_heap_after_young_gc(); + // Update object copying statistics. void record_obj_copy_mem_stats(); @@ -1086,7 +1087,7 @@ inline void archive_set_add(HeapRegion* hr); - size_t non_young_capacity_bytes() { + size_t non_young_capacity_bytes() const { return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; } @@ -1284,10 +1285,14 @@ // Print the maximum heap capacity. virtual size_t max_capacity() const; + virtual size_t min_capacity() const; // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. virtual size_t max_reserved_capacity() const; + // Print the soft maximum heap capacity. + size_t soft_max_capacity() const; + virtual jlong millis_since_last_gc(); --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-06-10 10:12:02.776969261 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-06-10 10:12:02.684966104 +0200 @@ -1146,8 +1146,6 @@ ClassLoaderDataGraph::purge(); } - _g1h->resize_heap_if_necessary(); - compute_new_sizes(); verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2020-06-10 10:12:03.256985736 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2020-06-10 10:12:03.164982578 +0200 @@ -36,6 +36,7 @@ #include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "utilities/compilerWarnings.hpp" +#include "utilities/numberSeq.hpp" class ConcurrentGCTimer; class G1ConcurrentMarkThread; --- old/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp 2020-06-10 10:12:03.713001390 +0200 +++ new/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp 2020-06-10 10:12:03.616998093 +0200 @@ -161,7 +161,7 @@ _cur_concatenate_dirty_card_logs_time_ms = 0.0; _cur_derived_pointer_table_update_time_ms = 0.0; _cur_clear_ct_time_ms = 0.0; - _cur_expand_heap_time_ms = 0.0; + _cur_resize_heap_time_ms = 0.0; _cur_ref_proc_time_ms = 0.0; _cur_collection_start_sec = 0.0; _root_region_scan_wait_time_ms = 0.0; @@ -471,7 +471,7 @@ _recorded_total_free_cset_time_ms + _recorded_total_rebuild_freelist_time_ms + _cur_fast_reclaim_humongous_time_ms + - _cur_expand_heap_time_ms + + _cur_resize_heap_time_ms + _cur_string_deduplication_time_ms; info_time("Post Evacuate Collection Set", sum_ms); @@ -523,8 +523,7 @@ if (UseTLAB && ResizeTLAB) { debug_time("Resize TLABs", _cur_resize_tlab_time_ms); } - debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); - + debug_time("Resize Heap After Collection", _cur_resize_heap_time_ms); return sum_ms; } --- old/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp 2020-06-10 10:12:04.177017318 +0200 +++ new/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp 2020-06-10 10:12:04.093014433 +0200 @@ -147,7 +147,7 @@ double _cur_derived_pointer_table_update_time_ms; double _cur_clear_ct_time_ms; - double _cur_expand_heap_time_ms; + double _cur_resize_heap_time_ms; double _cur_ref_proc_time_ms; double _cur_collection_start_sec; @@ -265,8 +265,8 @@ _cur_clear_ct_time_ms = ms; } - void record_expand_heap_time(double ms) { - _cur_expand_heap_time_ms = ms; + void record_resize_heap_time(double ms) { + _cur_resize_heap_time_ms = ms; } void record_initial_evac_time(double ms) { @@ -404,8 +404,8 @@ return _cur_clear_ct_time_ms; } - double cur_expand_heap_time_ms() { - return _cur_expand_heap_time_ms; + double cur_resize_heap_time_ms() { + return _cur_resize_heap_time_ms; } double root_region_scan_wait_time_ms() { --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-06-10 10:12:04.633032974 +0200 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-06-10 10:12:04.541029816 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/g1/g1_globals.hpp" #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1Analytics.hpp" @@ -38,16 +39,33 @@ G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) : _g1h(g1h), _analytics(analytics), - _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) { + _long_term_interval(analytics->number_of_recorded_pause_times()), + // Bias for expansion at startup; the +1 is to counter the first sample always + // being 0.0, i.e. lower than any threshold. + _ratio_exceeds_threshold(MinOverThresholdForExpansion / 2 + 1), + _recent_pause_ratios(analytics->number_of_recorded_pause_times()), + _long_term_count(0) { + + assert(_ratio_exceeds_threshold < MinOverThresholdForExpansion, + "Initial ratio counter value too high."); + assert(_ratio_exceeds_threshold > -MinOverThresholdForExpansion, + "Initial ratio counter value too low."); + assert(MinOverThresholdForExpansion < _long_term_interval, + "Expansion threshold count must be less than %u", _long_term_interval); + assert(MinOverThresholdForShrink < _long_term_interval, + "Shrink threshold count must be less than %u", _long_term_interval); +} - assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics); - clear_ratio_check_data(); +void G1HeapSizingPolicy::reset_ratio_tracking_data() { + _long_term_count = 0; + _ratio_exceeds_threshold = 0; + // Keep the recent gc time ratio data. } -void G1HeapSizingPolicy::clear_ratio_check_data() { - _ratio_over_threshold_count = 0; - _ratio_over_threshold_sum = 0.0; - _pauses_since_start = 0; +void G1HeapSizingPolicy::decay_ratio_tracking_data() { + _long_term_count = 0; + _ratio_exceeds_threshold /= 2; + // Keep the recent gc time ratio data. } double G1HeapSizingPolicy::scale_with_heap(double pause_time_threshold) { @@ -63,71 +81,134 @@ return threshold; } -static void log_expansion(double short_term_pause_time_ratio, - double long_term_pause_time_ratio, - double threshold, - double pause_time_ratio, - bool fully_expanded, - size_t resize_bytes) { +double G1HeapSizingPolicy::scale_resize_ratio_delta(double ratio_delta) { + // If the delta is small (less than the StartScaleDownAt value), scale the size + // down linearly, but not by less than MinScaleDownFactor. If the delta is large + // (greater than the StartScaleUpAt value), scale up, but adding no more than + // MaxScaleUpFactor times the base size. The scaling will be linear in the range + // from StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, + // ScaleUpRange sets the rate of scaling up. + double const MinScaleDownFactor = 0.2; + double const MaxScaleUpFactor = 2.0; + + double const StartScaleDownAt = 1.0; + double const StartScaleUpAt = 1.5; + double const ScaleUpRange = 4.0; + + double scale_factor; + if (ratio_delta < StartScaleDownAt) { + scale_factor = ratio_delta / StartScaleDownAt; + scale_factor = MAX2(scale_factor, MinScaleDownFactor); + } else if (ratio_delta > StartScaleUpAt) { + scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); + scale_factor = MIN2(scale_factor, MaxScaleUpFactor); + } + log_error(gc)("scaling ratio %1.2f scale %1.2f", ratio_delta, scale_factor); + return scale_factor; +} + +static void log_resize(double short_term_pause_time_ratio, + double long_term_pause_time_ratio, + double lower_threshold, + double upper_threshold, + double pause_time_ratio, + bool at_limit, + ssize_t resize_bytes) { - log_debug(gc, ergo, heap)("Heap expansion: " + log_debug(gc, ergo, heap)("Heap resize: " "short term pause time ratio %1.2f%% long term pause time ratio %1.2f%% " - "threshold %1.2f%% pause time ratio %1.2f%% fully expanded %s " - "resize by " SIZE_FORMAT "B", + "lower threshold %1.2f%% upper threshold %1.2f%% pause time ratio %1.2f%% " + "at limit %s resize by " SSIZE_FORMAT "B", short_term_pause_time_ratio * 100.0, long_term_pause_time_ratio * 100.0, - threshold * 100.0, + lower_threshold * 100.0, + upper_threshold * 100.0, pause_time_ratio * 100.0, - BOOL_TO_STR(fully_expanded), + BOOL_TO_STR(at_limit), resize_bytes); } -size_t G1HeapSizingPolicy::expansion_amount() { +ssize_t G1HeapSizingPolicy::resize_amount_after_young_gc() { assert(GCTimeRatio > 0, "must be"); double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio(); double short_term_pause_time_ratio = _analytics->short_term_pause_time_ratio(); - const double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio); - double threshold = scale_with_heap(pause_time_threshold); - - size_t expand_bytes = 0; - if (_g1h->capacity() == _g1h->max_capacity()) { - log_expansion(short_term_pause_time_ratio, long_term_pause_time_ratio, - threshold, pause_time_threshold, true, 0); - clear_ratio_check_data(); - return expand_bytes; + // Calculate gc time ratio thresholds: + // - upper threshold, directly based on GCTimeRatio. We do not want to exceed + // this. + // - lower threshold, we do not want to go under. + // - mid threshold, halfway between upper and lower threshold, represents the + // actual target when resizing the heap. + const double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio); + const double min_gc_time_ratio_ratio = G1MinimumPercentOfGCTimeRatio / 100.0; + double upper_threshold = scale_with_heap(pause_time_threshold); + double lower_threshold = upper_threshold * min_gc_time_ratio_ratio; + + // Explicitly use GCTimeRatio based threshold to more quickly expand and shrink + // at smaller heap sizes. + double mid_threshold = (upper_threshold + lower_threshold) / 2; + + // If the short term GC time ratio exceeds a threshold, increment the occurrence + // counter. + if (short_term_pause_time_ratio > upper_threshold) { + _ratio_exceeds_threshold++; + } else if (short_term_pause_time_ratio < lower_threshold) { + _ratio_exceeds_threshold--; } - - // If the last GC time ratio is over the threshold, increment the count of - // times it has been exceeded, and add this ratio to the sum of exceeded - // ratios. - if (short_term_pause_time_ratio > threshold) { - _ratio_over_threshold_count++; - _ratio_over_threshold_sum += short_term_pause_time_ratio; + double ratio_delta = (short_term_pause_time_ratio - mid_threshold) / mid_threshold; + // Ignore very first sample as it is garbage. + if (_long_term_count != 0 || _recent_pause_ratios.num() != 0) { + _recent_pause_ratios.add(ratio_delta); } + _long_term_count++; + + log_trace(gc, ergo, heap)("Heap resize triggers: long term count: %u " + "long term interval: %u " + "delta: %1.2f " + "ratio exceeds threshold count: %d", + _long_term_count, + _long_term_interval, + ratio_delta, + _ratio_exceeds_threshold); + + log_debug(gc, ergo, heap)("Heap triggers: pauses-since-start: %u num-prev-pauses-for-heuristics: %u ratio-exceeds-threshold-count: %d", + _recent_pause_ratios.num(), _long_term_interval, _ratio_exceeds_threshold); + + // Check if there is a short- or long-term need for resizing, expansion first. + // + // Short-term resizing need is detected by exceeding the upper or lower thresholds + // multiple times, tracked in _ratio_exceeds_threshold. If it contains a large + // positive or negative (larger than the respective thresholds), we trigger + // resizing calculation. + // + // Slowly occurring long-term changes to the actual gc time ratios are checked + // only every once a while. + // + // The _ratio_exceeds_threshold value is reset after each resize, or slowly + // decayed if nothing happens. + + ssize_t resize_bytes = 0; + + bool check_long_term_resize = _long_term_count == _long_term_interval; + + if ((_ratio_exceeds_threshold == MinOverThresholdForExpansion) || + (check_long_term_resize && (long_term_pause_time_ratio > upper_threshold))) { + + // Short-cut the case when we are already fully expanded. + if (_g1h->capacity() == _g1h->max_capacity()) { + log_resize(short_term_pause_time_ratio, long_term_pause_time_ratio, + lower_threshold, upper_threshold, pause_time_threshold, true, 0); + reset_ratio_tracking_data(); + return resize_bytes; + } - log_trace(gc, ergo, heap)("Heap expansion triggers: pauses since start: %u " - "num prev pauses for heuristics: %u " - "ratio over threshold count: %u", - _pauses_since_start, - _num_prev_pauses_for_heuristics, - _ratio_over_threshold_count); - - // Check if we've had enough GC time ratio checks that were over the - // threshold to trigger an expansion. We'll also expand if we've - // reached the end of the history buffer and the average of all entries - // is still over the threshold. This indicates a smaller number of GCs were - // long enough to make the average exceed the threshold. - bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; - if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || - (filled_history_buffer && (long_term_pause_time_ratio > threshold))) { - size_t min_expand_bytes = HeapRegion::GrainBytes; size_t reserved_bytes = _g1h->max_capacity(); size_t committed_bytes = _g1h->capacity(); size_t uncommitted_bytes = reserved_bytes - committed_bytes; size_t expand_bytes_via_pct = uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; + size_t min_expand_bytes = MIN2(HeapRegion::GrainBytes, uncommitted_bytes); double scale_factor = 1.0; // If the current size is less than 1/4 of the Initial heap size, expand @@ -137,61 +218,81 @@ // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of // the available expansion space, whichever is smaller, as the base // expansion size. Then possibly scale this size according to how much the - // threshold has (on average) been exceeded by. If the delta is small - // (less than the StartScaleDownAt value), scale the size down linearly, but - // not by less than MinScaleDownFactor. If the delta is large (greater than - // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor - // times the base size. The scaling will be linear in the range from - // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, - // ScaleUpRange sets the rate of scaling up. + // threshold has (on average) been exceeded by. if (committed_bytes < InitialHeapSize / 4) { - expand_bytes = (InitialHeapSize - committed_bytes) / 2; + resize_bytes = (InitialHeapSize - committed_bytes) / 2; } else { - double const MinScaleDownFactor = 0.2; - double const MaxScaleUpFactor = 2; - double const StartScaleDownAt = pause_time_threshold; - double const StartScaleUpAt = pause_time_threshold * 1.5; - double const ScaleUpRange = pause_time_threshold * 2.0; - - double ratio_delta; - if (filled_history_buffer) { - ratio_delta = long_term_pause_time_ratio - threshold; - } else { - ratio_delta = (_ratio_over_threshold_sum / _ratio_over_threshold_count) - threshold; + double ratio_delta = _recent_pause_ratios.avg(); + if (check_long_term_resize) { + ratio_delta = MAX2(ratio_delta, (long_term_pause_time_ratio - mid_threshold) / mid_threshold); } + log_error(gc)("expand deltas long %1.2f short %1.2f check long term %u", (long_term_pause_time_ratio - mid_threshold) / mid_threshold, _recent_pause_ratios.avg(), check_long_term_resize); + scale_factor = scale_resize_ratio_delta(fabsd(ratio_delta)); - expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); - if (ratio_delta < StartScaleDownAt) { - scale_factor = ratio_delta / StartScaleDownAt; - scale_factor = MAX2(scale_factor, MinScaleDownFactor); - } else if (ratio_delta > StartScaleUpAt) { - scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); - scale_factor = MIN2(scale_factor, MaxScaleUpFactor); - } + resize_bytes = MIN2(expand_bytes_via_pct, committed_bytes); } - expand_bytes = static_cast(expand_bytes * scale_factor); - + resize_bytes = static_cast(resize_bytes * scale_factor); + // Ensure the expansion size is at least the minimum growth amount // and at most the remaining uncommitted byte size. - expand_bytes = clamp(expand_bytes, min_expand_bytes, uncommitted_bytes); + resize_bytes = clamp((size_t)resize_bytes, min_expand_bytes, uncommitted_bytes); - clear_ratio_check_data(); - } else { - // An expansion was not triggered. If we've started counting, increment - // the number of checks we've made in the current window. If we've - // reached the end of the window without resizing, clear the counters to - // start again the next time we see a ratio above the threshold. - if (_ratio_over_threshold_count > 0) { - _pauses_since_start++; - if (_pauses_since_start > _num_prev_pauses_for_heuristics) { - clear_ratio_check_data(); - } + reset_ratio_tracking_data(); + } else if ((_ratio_exceeds_threshold == -MinOverThresholdForShrink) || + (check_long_term_resize && (long_term_pause_time_ratio < lower_threshold))) { + + if (_g1h->capacity() == _g1h->min_capacity()) { + log_resize(short_term_pause_time_ratio, long_term_pause_time_ratio, + lower_threshold, upper_threshold, pause_time_threshold, true, 0); + reset_ratio_tracking_data(); + return resize_bytes; + } + + // Shrink. + double ratio_delta = _recent_pause_ratios.avg(); + if (check_long_term_resize) { + // Intentionally use the max to limit the shrinking a bit. + ratio_delta = MAX2(ratio_delta, (long_term_pause_time_ratio - mid_threshold) / mid_threshold); } + log_error(gc)("shrink deltas long %1.2f short %1.2f long term %u", (long_term_pause_time_ratio - mid_threshold) / mid_threshold, _recent_pause_ratios.avg(), check_long_term_resize); + + double scale_factor = scale_resize_ratio_delta(fabsd(ratio_delta)); + scale_factor = clamp(scale_factor, 0.0, G1ShrinkByPercentOfAvailable / 100.0); + + // We are at the end of GC, so free regions are at maximum. + size_t free_regions = _g1h->num_free_regions() * (1 - G1ReservePercent / 100.0); + + resize_bytes = -((double)HeapRegion::GrainBytes * scale_factor * free_regions); + + log_debug(gc)("shrink log: filled_hist %d target ratio: %1.2f%% ratio delta: %1.2f%% scale factor %1.2f%% free_regions " SIZE_FORMAT " resize_bytes " SSIZE_FORMAT, + check_long_term_resize, mid_threshold * 100.0, _recent_pause_ratios.avg() * 100.0, scale_factor * 100.0, free_regions, resize_bytes); + + reset_ratio_tracking_data(); + } else if (check_long_term_resize) { + // A resize has not been triggered, but the long term counter overflowed. + decay_ratio_tracking_data(); } - log_expansion(short_term_pause_time_ratio, long_term_pause_time_ratio, - threshold, pause_time_threshold, false, expand_bytes); + log_resize(short_term_pause_time_ratio, long_term_pause_time_ratio, + lower_threshold, upper_threshold, pause_time_threshold, + false, resize_bytes); + + return resize_bytes; +} - return expand_bytes; +size_t G1HeapSizingPolicy::target_heap_capacity(size_t used_bytes, uintx free_ratio) const { + const double free_percentage = (double) free_ratio / 100.0; + const double used_percentage = 1.0 - free_percentage; + + // We have to be careful here as these two calculations can overflow + // 32-bit size_t's. + double used_bytes_d = (double) used_bytes; + double desired_capacity_d = used_bytes_d / used_percentage; + // Let's make sure that they are both under the max heap size, which + // by default will make it fit into a size_t. + double desired_capacity_upper_bound = (double) MaxHeapSize; + desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound); + // We can now safely turn it into size_t's. + return (size_t) desired_capacity_d; } --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp 2020-06-10 10:12:05.097048906 +0200 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp 2020-06-10 10:12:05.001045608 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,38 +26,82 @@ #define SHARE_GC_G1_G1HEAPSIZINGPOLICY_HPP #include "memory/allocation.hpp" +#include "utilities/numberSeq.hpp" class G1Analytics; class G1CollectedHeap; +// +// Contains heuristics for heap sizing, i.e. expansion or shrinking, during operation +// based on gc time ratio. +// +// The heuristics tracks both short term and long term behavior to effect heap +// size change. +// +// Short term tracking is based on short-term gc time ratio behavior: for this we +// record events for when actual gc time ratio is outside the range of +// [GCTimeRatio * G1MinimumPercentOfGCTimeRatio, GCTimeRatio] or not in a counter. +// If below that range, we decrement that counter, if above, we increment it. +// +// The intent of this mechanism is to filter short term events as heap sizing has +// some overhead. +// +// If that counter reaches the MinOverThresholdForExpansion we consider expansion, +// if that counter reaches -MinOverThresholdForShrink we consider heap shrinking. +// +// While doing so, we accumulate the difference to the midpoint of this range to +// guide the expansion/shrinking amount. +// +// Further, if there is no short-term based resizing event for a "long" time, we +// decay that counter, i.e. drop it towards zero again to avoid that previous +// intermediate length short term behavior followed by a quiet time and a single +// short term event causes unnecessary resizes. +// +// Long term behavior is solely managed by regularly comparing actual long term +// gc time ratio with the boundaries of above range in regular long term +// intervals. If current long term gc time ratio is outside, expand or shrink +// respectively. +// class G1HeapSizingPolicy: public CHeapObj { - // MinOverThresholdForGrowth must be less than the number of recorded - // pause times in G1Analytics, representing the minimum number of pause - // time ratios that exceed GCTimeRatio before a heap expansion will be triggered. - const static uint MinOverThresholdForGrowth = 4; + // MinOverThresholdForExpansion/Shrink define the number of actual gc time + // ratios over the upper and lower thresholds respectively. + const static int MinOverThresholdForExpansion = 4; + const static int MinOverThresholdForShrink = 4; const G1CollectedHeap* _g1h; const G1Analytics* _analytics; - const uint _num_prev_pauses_for_heuristics; - // Ratio check data for determining if heap growth is necessary. - uint _ratio_over_threshold_count; - double _ratio_over_threshold_sum; - uint _pauses_since_start; + const uint _long_term_interval; + // Number of times actual gc time ratio crossed lower and upper threshold + // recently; every time the upper threshold is exceeded, it is incremented, + // and decremented if the lower threshold is undercut. + int _ratio_exceeds_threshold; + // Recent actual gc time ratios relative to the middle of lower and upper threshold. + TruncatedSeq _recent_pause_ratios; + uint _long_term_count; + + // Clear ratio tracking data used by resize_amount(). + void reset_ratio_tracking_data(); + void decay_ratio_tracking_data(); - // Scale "full" gc pause time threshold with heap size as we want to resize more + // Scale "full" gc time ratio threshold with heap size as we want to resize more // eagerly at small heap sizes. double scale_with_heap(double pause_time_threshold); + // Scale the ratio delta depending on how far it exceeds the actual target gc time + // ratio. + double scale_resize_ratio_delta(double ratio_delta); + G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics); public: - // If an expansion would be appropriate, because recent GC overhead had - // exceeded the desired limit, return an amount to expand by. - size_t expansion_amount(); + // Return by how many bytes the heap should be changed based on recent gc time + // ratio after young collection. Positive values mean expansion is desired, and + // negative values mean desired shrinking. + ssize_t resize_amount_after_young_gc(); - // Clear ratio tracking data used by expansion_amount(). - void clear_ratio_check_data(); + // Calculate the target capacity based on used bytes and free ratio. + size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) const; static G1HeapSizingPolicy* create(const G1CollectedHeap* g1h, const G1Analytics* analytics); }; --- old/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-06-10 10:12:05.537064015 +0200 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-06-10 10:12:05.445060856 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ _target_occupancy = new_target_occupancy; } -void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) { +void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes) { assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s); _last_allocation_time_s = allocation_time_s; @@ -89,12 +89,23 @@ _heap_reserve_percent(heap_reserve_percent), _heap_waste_percent(heap_waste_percent), _predictor(predictor), - _marking_times_s(10, 0.95), - _allocation_rate_s(10, 0.95), + _marking_times_s(10), + _allocation_rate_s(10), _last_unrestrained_young_size(0) { } +void G1AdaptiveIHOPControl::update_target_occupancy(size_t new_target_occupancy) { + size_t old_target_occupancy = _target_occupancy; + G1IHOPControl::update_target_occupancy(new_target_occupancy); + + // When resizing the heap, as an estimate of the new value, scale the + // buffer needed linearly. + if (old_target_occupancy != 0) { + _last_unrestrained_young_size = _last_unrestrained_young_size * (new_target_occupancy / (double)old_target_occupancy); + } +} + size_t G1AdaptiveIHOPControl::actual_target_threshold() const { guarantee(_target_occupancy > 0, "Target occupancy still not updated yet."); // The actual target threshold takes the heap reserve and the expected waste in @@ -108,7 +119,7 @@ double safe_total_heap_percentage = MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0); return (size_t)MIN2( - G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, + G1CollectedHeap::heap()->soft_max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, _target_occupancy * (100.0 - _heap_waste_percent) / 100.0 ); } @@ -146,14 +157,15 @@ } void G1AdaptiveIHOPControl::update_allocation_info(double allocation_time_s, - size_t allocated_bytes, - size_t additional_buffer_size) { - G1IHOPControl::update_allocation_info(allocation_time_s, allocated_bytes, additional_buffer_size); + size_t allocated_bytes) { + G1IHOPControl::update_allocation_info(allocation_time_s, allocated_bytes); double allocation_rate = (double) allocated_bytes / allocation_time_s; _allocation_rate_s.add(allocation_rate); +} - _last_unrestrained_young_size = additional_buffer_size; +void G1AdaptiveIHOPControl::update_additional_buffer(size_t additional_buffer_bytes) { + _last_unrestrained_young_size = additional_buffer_bytes; } void G1AdaptiveIHOPControl::update_marking_length(double marking_length_s) { --- old/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-06-10 10:12:05.985079397 +0200 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-06-10 10:12:05.897076376 +0200 @@ -63,14 +63,15 @@ // Adjust target occupancy. virtual void update_target_occupancy(size_t new_target_occupancy); // Update information about time during which allocations in the Java heap occurred, - // how large these allocations were in bytes, and an additional buffer. + // and how large these allocations were in bytes. // The allocations should contain any amount of space made unusable for further // allocation, e.g. any waste caused by TLAB allocation, space at the end of // humongous objects that can not be used for allocation, etc. // Together with the target occupancy, this additional buffer should contain the // difference between old gen size and total heap size at the start of reclamation, // and space required for that reclamation. - virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size); + virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes); + virtual void update_additional_buffer(size_t additional_buffer_bytes) { } // Update the time spent in the mutator beginning from the end of initial mark to // the first mixed gc. virtual void update_marking_length(double marking_length_s) = 0; @@ -140,9 +141,11 @@ size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into. size_t heap_waste_percent); // The percentage of the free space in the heap that we think is not usable for allocation. + virtual void update_target_occupancy(size_t new_target_occupancy); virtual size_t get_conc_mark_start_threshold(); - virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size); + virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes); + virtual void update_additional_buffer(size_t additional_buffer_bytes); virtual void update_marking_length(double marking_length_s); virtual void print(); --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-06-10 10:12:06.421094371 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-06-10 10:12:06.329091212 +0200 @@ -46,6 +46,7 @@ #include "gc/shared/gcPolicyCounters.hpp" #include "logging/log.hpp" #include "runtime/arguments.hpp" +#include "runtime/globals.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" @@ -62,7 +63,6 @@ _full_collection_start_sec(0.0), _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), _young_list_target_length(0), - _young_list_fixed_length(0), _young_list_max_length(0), _eden_surv_rate_group(new G1SurvRateGroup()), _survivor_surv_rate_group(new G1SurvRateGroup()), @@ -107,14 +107,11 @@ assert(Heap_lock->owned_by_self(), "Locking discipline."); - if (!use_adaptive_young_list_length()) { - _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); - } _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions()); _free_regions_at_end_of_collection = _g1h->num_free_regions(); - update_young_list_max_and_target_length(); + update_young_max_and_target_length(); // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info _collection_set->start_incremental_building(); @@ -189,158 +186,252 @@ _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); } -uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const { +uint G1Policy::calculate_desired_eden_length_by_mmu() const { + // One could argue that any useful eden length to keep any MMU would be 1, but + // in theory this is possible. Other constraints enforce a minimum eden of 1 + // anyway. uint desired_min_length = 0; if (use_adaptive_young_list_length()) { - if (_analytics->num_alloc_rate_ms() > 3) { - double now_sec = os::elapsedTime(); - double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; - double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); - desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); - } else { - // otherwise we don't have enough info to make the prediction - } + double now_sec = os::elapsedTime(); + double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; + double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); + desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); } - desired_min_length += base_min_length; - // make sure we don't go below any user-defined minimum bound - return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); -} - -uint G1Policy::calculate_young_list_desired_max_length() const { - // Here, we might want to also take into account any additional - // constraints (i.e., user-defined minimum bound). Currently, we - // effectively don't set this bound. - return _young_gen_sizer->max_desired_young_length(); + return desired_min_length; } -uint G1Policy::update_young_list_max_and_target_length() { - return update_young_list_max_and_target_length(_analytics->predict_rs_length()); +uint G1Policy::update_young_max_and_target_length() { + return update_young_max_and_target_length(_analytics->predict_rs_length()); } -uint G1Policy::update_young_list_max_and_target_length(size_t rs_length) { - uint unbounded_target_length = update_young_list_target_length(rs_length); +uint G1Policy::update_young_max_and_target_length(size_t rs_length) { + uint unbounded_target_length = update_young_target_length(rs_length); update_max_gc_locker_expansion(); return unbounded_target_length; } -uint G1Policy::update_young_list_target_length(size_t rs_length) { - YoungTargetLengths young_lengths = young_list_target_lengths(rs_length); - _young_list_target_length = young_lengths.first; - - return young_lengths.second; -} - -G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_length) const { - YoungTargetLengths result; +uint G1Policy::update_young_target_length(size_t rs_length) { + uint desired_length = calculate_young_desired_length(rs_length); + _young_list_target_length = calculate_young_target_length(desired_length); + + log_debug(gc,ergo,heap)("Young target lengths: desired: %u target: %u", + desired_length, _young_list_target_length); + return desired_length; +} + +// Calculates desired young gen length. It is calculated from: +// +// - sizer min/max bounds on young gen +// - pause time goal for whole young gen evacuation +// - MMU goal influencing eden to make GCs spaced apart. +// - a minimum one eden region length. +// +uint G1Policy::calculate_young_desired_length(size_t rs_length) const { + uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length(); + uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length(); + + assert(min_young_length_by_sizer >= 1, "invariant"); + assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant"); + + // Absolute minimum eden length. See above why. + // Enforcing a minimum eden length helps at startup when the predictors are not + // yet trained on the application to avoid unnecessary (but very short) full gcs + // on very small (initial) heaps. + uint const MinDesiredEdenLength = 1; // Calculate the absolute and desired min bounds first. - // This is how many young regions we already have (currently: the survivors). - const uint base_min_length = _g1h->survivor_regions_count(); - uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); - // This is the absolute minimum young length. Ensure that we - // will at least have one eden region available for allocation. - uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1); - // If we shrank the young list target it should not shrink below the current size. - desired_min_length = MAX2(desired_min_length, absolute_min_length); - // Calculate the absolute and desired max bounds. + // This is how many survivor regions we already have. + const uint survivor_length = _g1h->survivor_regions_count(); + // Size of the already allocated young gen. + const uint allocated_young_length = _g1h->young_regions_count(); + // This is the absolute minimum young length that we can return. Ensure that we + // don't go below any user-defined minimum bound; but we might have already + // allocated more than that for reasons. In this case, use that. + uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer); + // Calculate the absolute max bounds. After evac failure or when revising the + // young length we might have exceeded absolute min length or absolute_max_length, + // so adjust the result accordingly. + uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length); + + uint desired_eden_length_by_mmu = 0; + uint desired_eden_length_by_pause = 0; + uint desired_eden_length_before_mixed = 0; - uint desired_max_length = calculate_young_list_desired_max_length(); - - uint young_list_target_length = 0; + uint desired_young_length = 0; if (use_adaptive_young_list_length()) { - if (collector_state()->in_young_only_phase()) { - young_list_target_length = - calculate_young_list_target_length(rs_length, - base_min_length, - desired_min_length, - desired_max_length); + desired_eden_length_by_mmu = calculate_desired_eden_length_by_mmu(); + + const size_t pending_cards = _analytics->predict_pending_cards(); + double survivor_base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length); + + if (!next_gc_should_be_mixed(NULL, NULL)) { + desired_eden_length_by_pause = + calculate_desired_eden_length_by_pause(survivor_base_time_ms, + absolute_min_young_length - survivor_length, + absolute_max_young_length - survivor_length); } else { - // Don't calculate anything and let the code below bound it to - // the desired_min_length, i.e., do the next GC as soon as - // possible to maximize how many old regions we can add to it. + desired_eden_length_before_mixed = + calculate_desired_eden_length_before_mixed(survivor_base_time_ms, + absolute_min_young_length - survivor_length, + absolute_max_young_length - survivor_length); } + // Above either sets desired_eden_length_by_pause or desired_eden_length_before_mixed, + // the other is zero. Use the one that has been set below. + uint desired_eden_length = MAX2(desired_eden_length_by_pause, + desired_eden_length_before_mixed); + + // Finally incorporate MMU concerns; assume that it overrides the pause time + // goal, as the default value has been chosen to effectively disable it. + // Also request at least one eden region, see above for reasons. + desired_eden_length = MAX3(desired_eden_length, + desired_eden_length_by_mmu, + MinDesiredEdenLength); + + desired_young_length = desired_eden_length + survivor_length; } else { // The user asked for a fixed young gen so we'll fix the young gen // whether the next GC is young or mixed. - young_list_target_length = _young_list_fixed_length; - } - - result.second = young_list_target_length; - - // We will try our best not to "eat" into the reserve. - uint absolute_max_length = 0; - if (_free_regions_at_end_of_collection > _reserve_regions) { - absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; - } - if (desired_max_length > absolute_max_length) { - desired_max_length = absolute_max_length; + desired_young_length = min_young_length_by_sizer; } + // Clamp to absolute min/max after we determined desired lengths. + desired_young_length = clamp(desired_young_length, absolute_min_young_length, absolute_max_young_length); - // Make sure we don't go over the desired max length, nor under the - // desired min length. In case they clash, desired_min_length wins - // which is why that test is second. - if (young_list_target_length > desired_max_length) { - young_list_target_length = desired_max_length; - } - if (young_list_target_length < desired_min_length) { - young_list_target_length = desired_min_length; + log_trace(gc, ergo, heap)("Young desired length %u " + "survivor length %u " + "allocated young length %u " + "absolute min young length %u " + "absolute max young length %u " + "desired eden length by mmu %u " + "desired eden length by pause %u " + "desired eden length before mixed %u" + "desired eden length by default %u", + desired_young_length, survivor_length, + allocated_young_length, absolute_min_young_length, + absolute_max_young_length, desired_eden_length_by_mmu, + desired_eden_length_by_pause, + desired_eden_length_before_mixed, + MinDesiredEdenLength); + + assert(desired_young_length >= allocated_young_length, "must be"); + return desired_young_length; +} + +// Limit the desired (wished) young length by current free regions. If the request +// can be satisfied without using up reserve regions, do so, otherwise eat into +// the reserve, giving away at most what the heap sizer allows. +uint G1Policy::calculate_young_target_length(uint desired_young_length) const { + uint allocated_young_length = _g1h->young_regions_count(); + + uint receiving_additional_eden; + if (allocated_young_length >= desired_young_length) { + // Already used up all we actually want (may happen as G1 revises the + // young list length concurrently, or caused by gclocker). Do not allow more, + // potentially resulting in GC. + receiving_additional_eden = 0; + log_trace(gc, ergo, heap)("Young target length: Already used up desired young %u allocated %u", + desired_young_length, + allocated_young_length); + } else { + // Now look at how many free regions are there currently, and the heap reserve. + // We will try our best not to "eat" into the reserve as long as we can. If we + // do, we at most eat the sizer's minimum regions into the reserve or half the + // reserve rounded up (if possible; this is an arbitrary value). + + uint max_to_eat_into_reserve = MIN2(_young_gen_sizer->min_desired_young_length(), + (_reserve_regions + 1) / 2); + + log_trace(gc, ergo, heap)("Young target length: Common " + "free regions at end of collection %u " + "desired young length %u " + "reserve region %u " + "max to eat into reserve %u", + _free_regions_at_end_of_collection, + desired_young_length, + _reserve_regions, + max_to_eat_into_reserve); + + if (_free_regions_at_end_of_collection <= _reserve_regions) { + // Fully eat (or already eating) into the reserve, hand back at most absolute_min_length regions. + uint receiving_young = MIN3(_free_regions_at_end_of_collection, + desired_young_length, + max_to_eat_into_reserve); + // We could already have allocated more regions than what we could get + // above. + receiving_additional_eden = allocated_young_length < receiving_young ? + receiving_young - allocated_young_length : 0; + + log_trace(gc, ergo, heap)("Young target length: Fully eat into reserve " + "receiving young %u receiving additional eden %u", + receiving_young, + receiving_additional_eden); + } else if (_free_regions_at_end_of_collection < (desired_young_length + _reserve_regions)) { + // Partially eat into the reserve, at most max_to_eat_into_reserve regions. + uint free_outside_reserve = _free_regions_at_end_of_collection - _reserve_regions; + assert(free_outside_reserve < desired_young_length, + "must be %u %u", + free_outside_reserve, desired_young_length); + + uint receiving_within_reserve = MIN2(desired_young_length - free_outside_reserve, + max_to_eat_into_reserve); + uint receiving_young = free_outside_reserve + receiving_within_reserve; + // Again, we could have already allocated more than we could get. + receiving_additional_eden = allocated_young_length < receiving_young ? + receiving_young - allocated_young_length : 0; + + log_trace(gc, ergo, heap)("Young target length: Partially eat into reserve " + "free outside reserve %u " + "receiving within reserve %u " + "receiving young %u " + "receiving additional eden %u", + free_outside_reserve, receiving_within_reserve, + receiving_young, receiving_additional_eden); + } else { + // No need to use the reserve. + receiving_additional_eden = desired_young_length - allocated_young_length; + log_trace(gc, ergo, heap)("Young target length: No need to use reserve " + "receiving additional eden %u", + receiving_additional_eden); + } } - assert(young_list_target_length > base_min_length, - "we should be able to allocate at least one eden region"); - assert(young_list_target_length >= absolute_min_length, "post-condition"); + uint target_young_length = allocated_young_length + receiving_additional_eden; - result.first = young_list_target_length; - return result; -} + assert(target_young_length >= allocated_young_length, "must be"); -uint G1Policy::calculate_young_list_target_length(size_t rs_length, - uint base_min_length, - uint desired_min_length, - uint desired_max_length) const { + log_trace(gc, ergo, heap)("Young target length: " + "young target length %u " + "allocated young length %u " + "received additional eden %u", + target_young_length, allocated_young_length, + receiving_additional_eden); + return target_young_length; +} + +uint G1Policy::calculate_desired_eden_length_by_pause(double base_time_ms, + uint min_eden_length, + uint max_eden_length) const { assert(use_adaptive_young_list_length(), "pre-condition"); - assert(collector_state()->in_young_only_phase(), "only call this for young GCs"); - // In case some edge-condition makes the desired max length too small... - if (desired_max_length <= desired_min_length) { - return desired_min_length; - } - - // We'll adjust min_young_length and max_young_length not to include - // the already allocated young regions (i.e., so they reflect the - // min and max eden regions we'll allocate). The base_min_length - // will be reflected in the predictions by the - // survivor_regions_evac_time prediction. - assert(desired_min_length > base_min_length, "invariant"); - uint min_young_length = desired_min_length - base_min_length; - assert(desired_max_length > base_min_length, "invariant"); - uint max_young_length = desired_max_length - base_min_length; - - const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; - const size_t pending_cards = _analytics->predict_pending_cards(); - const double base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length); - const uint available_free_regions = _free_regions_at_end_of_collection; - const uint base_free_regions = - available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0; + assert(min_eden_length <= max_eden_length, "must be %u %u", min_eden_length, max_eden_length); // Here, we will make sure that the shortest young length that // makes sense fits within the target pause time. G1YoungLengthPredictor p(base_time_ms, - base_free_regions, - target_pause_time_ms, + _free_regions_at_end_of_collection, + _mmu_tracker->max_gc_time() * 1000.0, this); - if (p.will_fit(min_young_length)) { + if (p.will_fit(min_eden_length)) { // The shortest young length will fit into the target pause time; // we'll now check whether the absolute maximum number of young // regions will fit in the target pause time. If not, we'll do // a binary search between min_young_length and max_young_length. - if (p.will_fit(max_young_length)) { + if (p.will_fit(max_eden_length)) { // The maximum young length will fit into the target pause time. // We are done so set min young length to the maximum length (as // the result is assumed to be returned in min_young_length). - min_young_length = max_young_length; + min_eden_length = max_eden_length; } else { // The maximum possible number of young regions will not fit within // the target pause time so we'll search for the optimal @@ -357,37 +448,56 @@ // does, it becomes the new min. If it doesn't, it becomes // the new max. This way we maintain the loop invariants. - assert(min_young_length < max_young_length, "invariant"); - uint diff = (max_young_length - min_young_length) / 2; + assert(min_eden_length < max_eden_length, "invariant"); + uint diff = (max_eden_length - min_eden_length) / 2; while (diff > 0) { - uint young_length = min_young_length + diff; - if (p.will_fit(young_length)) { - min_young_length = young_length; + uint eden_length = min_eden_length + diff; + if (p.will_fit(eden_length)) { + min_eden_length = eden_length; } else { - max_young_length = young_length; + max_eden_length = eden_length; } - assert(min_young_length < max_young_length, "invariant"); - diff = (max_young_length - min_young_length) / 2; + assert(min_eden_length < max_eden_length, "invariant"); + diff = (max_eden_length - min_eden_length) / 2; } // The results is min_young_length which, according to the // loop invariants, should fit within the target pause time. // These are the post-conditions of the binary search above: - assert(min_young_length < max_young_length, - "otherwise we should have discovered that max_young_length " + assert(min_eden_length < max_eden_length, + "otherwise we should have discovered that max_eden_length " "fits into the pause target and not done the binary search"); - assert(p.will_fit(min_young_length), - "min_young_length, the result of the binary search, should " + assert(p.will_fit(min_eden_length), + "min_eden_length, the result of the binary search, should " "fit into the pause target"); - assert(!p.will_fit(min_young_length + 1), - "min_young_length, the result of the binary search, should be " + assert(!p.will_fit(min_eden_length + 1), + "min_eden_length, the result of the binary search, should be " "optimal, so no larger length should fit into the pause target"); } } else { // Even the minimum length doesn't fit into the pause time // target, return it as the result nevertheless. } - return base_min_length + min_young_length; + return min_eden_length; +} + +uint G1Policy::calculate_desired_eden_length_before_mixed(double survivor_base_time_ms, + uint min_eden_length, + uint max_eden_length) const { + G1CollectionSetCandidates* candidates = _collection_set->candidates(); + + uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(), candidates->num_regions()); + double predicted_region_evac_time_ms = survivor_base_time_ms; + for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) { + HeapRegion* r = candidates->at(i); + predicted_region_evac_time_ms += predict_region_total_time_ms(r, false); + } + uint desired_eden_length_by_min_cset_length = + calculate_desired_eden_length_by_pause(predicted_region_evac_time_ms, + min_eden_length, + max_eden_length); + + return desired_eden_length_by_min_cset_length; } double G1Policy::predict_survivor_regions_evac_time() const { @@ -409,7 +519,7 @@ size_t rs_length_prediction = rs_length * 1100 / 1000; update_rs_length_prediction(rs_length_prediction); - update_young_list_max_and_target_length(rs_length_prediction); + update_young_max_and_target_length(rs_length_prediction); } } @@ -457,7 +567,7 @@ _free_regions_at_end_of_collection = _g1h->num_free_regions(); _survivor_surv_rate_group->reset(); - update_young_list_max_and_target_length(); + update_young_max_and_target_length(); update_rs_length_prediction(); _old_gen_alloc_tracker.reset_after_full_gc(); @@ -669,10 +779,8 @@ double alloc_rate_ms = (double) regions_allocated / app_time_ms; _analytics->report_alloc_rate_ms(alloc_rate_ms); - double interval_ms = - (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; + _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); - _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); } if (collector_state()->in_young_gc_before_mixed()) { @@ -793,13 +901,14 @@ // restrained by the heap reserve. Using the actual length would make the // prediction too small and the limit the young gen every time we get to the // predicted target occupancy. - size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); + size_t last_unrestrained_young_length = update_young_max_and_target_length(); _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), _old_gen_alloc_tracker.last_cycle_old_bytes(), last_unrestrained_young_length * HeapRegion::GrainBytes, - this_pause_was_young_only); + this_pause_was_young_only, + this_pause_included_initial_mark); _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); } else { @@ -849,7 +958,8 @@ void G1Policy::update_ihop_prediction(double mutator_time_s, size_t mutator_alloc_bytes, size_t young_gen_size, - bool this_gc_was_young_only) { + bool this_gc_was_young_only, + bool this_gc_included_initial_mark) { // Always try to update IHOP prediction. Even evacuation failures give information // about e.g. whether to start IHOP earlier next time. @@ -876,7 +986,10 @@ // marking, which makes any prediction useless. This increases the accuracy of the // prediction. if (this_gc_was_young_only && mutator_time_s > min_valid_time) { - _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); + _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes); + if (this_gc_included_initial_mark) { + _ihop_control->update_additional_buffer(young_gen_size); + } report = true; } @@ -1203,8 +1316,10 @@ const char* false_action_str) const { G1CollectionSetCandidates* candidates = _collection_set->candidates(); - if (candidates->is_empty()) { - log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); + if (candidates == NULL || candidates->is_empty()) { + if (false_action_str != NULL) { + log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); + } return false; } @@ -1213,12 +1328,16 @@ double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); double threshold = (double) G1HeapWastePercent; if (reclaimable_percent <= threshold) { - log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + if (false_action_str != NULL) { + log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, + false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + } return false; } - log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + if (true_action_str != NULL) { + log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, + true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + } return true; } --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-06-10 10:12:06.937112094 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-06-10 10:12:06.845108933 +0200 @@ -61,7 +61,8 @@ void update_ihop_prediction(double mutator_time_s, size_t mutator_alloc_bytes, size_t young_gen_size, - bool this_gc_was_young_only); + bool this_gc_was_young_only, + bool this_gc_included_initial_mark); void report_ihop_statistics(); G1Predictions _predictor; @@ -77,7 +78,6 @@ jlong _collection_pause_end_millis; uint _young_list_target_length; - uint _young_list_fixed_length; // The max number of regions we can extend the eden by while the GC // locker is active. This should be >= _young_list_target_length; @@ -170,6 +170,10 @@ private: G1CollectionSet* _collection_set; + + bool next_gc_should_be_mixed(const char* true_action_str, + const char* false_action_str) const; + double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; double other_time_ms(double pause_time_ms) const; @@ -189,44 +193,42 @@ double _mark_remark_start_sec; double _mark_cleanup_start_sec; - // Updates the internal young list maximum and target lengths. Returns the - // unbounded young list target length. If no rs_length parameter is passed, + // Updates the internal young gen maximum and target lengths. Returns the + // unbounded young target length. If no rs_length parameter is passed, // predict the RS length using the prediction model, otherwise use the // given rs_length as the prediction. - uint update_young_list_max_and_target_length(); - uint update_young_list_max_and_target_length(size_t rs_length); + uint update_young_max_and_target_length(); + uint update_young_max_and_target_length(size_t rs_length); // Update the young list target length either by setting it to the // desired fixed value or by calculating it using G1's pause // prediction model. // Returns the unbounded young list target length. - uint update_young_list_target_length(size_t rs_length); + uint update_young_target_length(size_t rs_length); - // Calculate and return the minimum desired young list target - // length. This is the minimum desired young list length according - // to the user's inputs. - uint calculate_young_list_desired_min_length(uint base_min_length) const; - - // Calculate and return the maximum desired young list target - // length. This is the maximum desired young list length according - // to the user's inputs. - uint calculate_young_list_desired_max_length() const; - - // Calculate and return the maximum young list target length that - // can fit into the pause time goal. The parameters are: rs_length - // represent the prediction of how large the young RSet lengths will - // be, base_min_length is the already existing number of regions in - // the young list, min_length and max_length are the desired min and - // max young list length according to the user's inputs. - uint calculate_young_list_target_length(size_t rs_length, - uint base_min_length, - uint desired_min_length, - uint desired_max_length) const; - - // Result of the bounded_young_list_target_length() method, containing both the - // bounded as well as the unbounded young list target lengths in this order. - typedef Pair YoungTargetLengths; - YoungTargetLengths young_list_target_lengths(size_t rs_length) const; + // Calculate and return the minimum desired eden length based on the MMU target. + uint calculate_desired_eden_length_by_mmu() const; + + // Calculate and return the desired eden length that can fit into the pause time goal. + // The parameters are: rs_length represents the prediction of how large the + // young RSet lengths will be, min_eden_length and max_eden_length are the bounds + // (inclusive) within eden can grow. + uint calculate_desired_eden_length_by_pause(double base_time_ms, + uint min_eden_length, + uint max_eden_length) const; + + // Calculates the desired eden length before mixed gc so that after adding the + // minimum amount of old gen regions from the collection set, the eden fits into + // the pause time goal. + uint calculate_desired_eden_length_before_mixed(double survivor_base_time_ms, + uint min_eden_length, + uint max_eden_length) const; + + // Calculate desired young length based on current situation without taking actually + // available free regions into account. + uint calculate_young_desired_length(size_t rs_length) const; + // Limit the given desired young length to available free regions. + uint calculate_young_target_length(uint desired_young_length) const; void update_rs_length_prediction(); void update_rs_length_prediction(size_t prediction); @@ -335,9 +337,6 @@ void print_phases(); - bool next_gc_should_be_mixed(const char* true_action_str, - const char* false_action_str) const; - // Calculate and return the number of initial and optional old gen regions from // the given collection set candidates and the remaining time. void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, --- old/src/hotspot/share/gc/g1/g1_globals.hpp 2020-06-10 10:12:07.385127481 +0200 +++ new/src/hotspot/share/gc/g1/g1_globals.hpp 2020-06-10 10:12:07.289124183 +0200 @@ -104,8 +104,16 @@ "specifies that mutator threads should not do such filtering.") \ range(0, 100) \ \ - experimental(intx, G1ExpandByPercentOfAvailable, 20, \ - "When expanding, % of uncommitted space to claim.") \ + experimental(size_t, G1ExpandByPercentOfAvailable, 20, \ + "When expanding, maximum % of uncommitted space to claim.") \ + range(0, 100) \ + \ + experimental(size_t, G1ShrinkByPercentOfAvailable, 50, \ + "When shrinking, maximum % of free space to claim.") \ + range(0, 100) \ + \ + experimental(size_t, G1MinimumPercentOfGCTimeRatio, 50, \ + "Percentage of GCTimeRatio G1 will try to avoid going below.") \ range(0, 100) \ \ product(size_t, G1UpdateBufferSize, 256, \ --- old/src/hotspot/share/utilities/numberSeq.cpp 2020-06-10 10:12:07.821142457 +0200 +++ new/src/hotspot/share/utilities/numberSeq.cpp 2020-06-10 10:12:07.729139298 +0200 @@ -28,11 +28,18 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/numberSeq.hpp" -AbsSeq::AbsSeq(double alpha) : - _num(0), _sum(0.0), _sum_of_squares(0.0), +AbsSeq::AbsSeq(double alpha) : _num(0), _sum(0.0), _sum_of_squares(0.0), _davg(0.0), _dvariance(0.0), _alpha(alpha) { } +void AbsSeq::reset() { + _num = 0; + _sum = 0.0; + _sum_of_squares = 0.0; + _davg = 0.0; + _dvariance = 0.0; +} + void AbsSeq::add(double val) { if (_num == 0) { // if the sequence is empty, the davg is the same as the value @@ -131,11 +138,10 @@ } -TruncatedSeq::TruncatedSeq(int length, double alpha): +TruncatedSeq::TruncatedSeq(uint length, double alpha): AbsSeq(alpha), _length(length), _next(0) { _sequence = NEW_C_HEAP_ARRAY(double, _length, mtInternal); - for (int i = 0; i < _length; ++i) - _sequence[i] = 0.0; + TruncatedSeq::reset(); } TruncatedSeq::~TruncatedSeq() { @@ -171,7 +177,7 @@ if (_num == 0) return 0.0; double ret = _sequence[0]; - for (int i = 1; i < _num; ++i) { + for (uint i = 1; i < _num; ++i) { double val = _sequence[i]; if (val > ret) ret = val; @@ -211,7 +217,7 @@ double y_avg = 0.0; int first = (_next + _length - _num) % _length; - for (int i = 0; i < _num; ++i) { + for (uint i = 0; i < _num; ++i) { double x = (double) i; double y = _sequence[(first + i) % _length]; @@ -231,6 +237,13 @@ return b0 + b1 * num; } +void TruncatedSeq::reset() { + AbsSeq::reset(); + for (uint i = 0; i < _length; ++i) { + _sequence[i] = 0.0; + } + _next = 0; +} // Printing/Debugging Support @@ -251,7 +264,7 @@ void TruncatedSeq::dump_on(outputStream* s) { AbsSeq::dump_on(s); s->print_cr("\t\t _length = %d, _next = %d", _length, _next); - for (int i = 0; i < _length; i++) { + for (uint i = 0; i < _length; i++) { if (i%5 == 0) { s->cr(); s->print("\t"); --- old/src/hotspot/share/utilities/numberSeq.hpp 2020-06-10 10:12:08.257157436 +0200 +++ new/src/hotspot/share/utilities/numberSeq.hpp 2020-06-10 10:12:08.165154275 +0200 @@ -47,7 +47,7 @@ void init(double alpha); protected: - int _num; // the number of elements in the sequence + uint _num; // the number of elements in the sequence double _sum; // the sum of the elements in the sequence double _sum_of_squares; // the sum of squares of the elements in the sequence @@ -62,13 +62,14 @@ public: AbsSeq(double alpha = DEFAULT_ALPHA_VALUE); + virtual void reset(); virtual void add(double val); // adds a new element to the sequence void add(unsigned val) { add((double) val); } virtual double maximum() const = 0; // maximum element in the sequence virtual double last() const = 0; // last element added in the sequence // the number of elements in the sequence - int num() const { return _num; } + uint num() const { return _num; } // the sum of the elements in the sequence double sum() const { return _sum; } @@ -112,12 +113,12 @@ void init(); protected: double *_sequence; // buffers the last L elements in the sequence - int _length; // this is L + uint _length; // this is L int _next; // oldest slot in the array, i.e. next to be overwritten public: // accepts a value for L - TruncatedSeq(int length = DefaultSeqLength, + TruncatedSeq(uint length = DefaultSeqLength, double alpha = DEFAULT_ALPHA_VALUE); ~TruncatedSeq(); virtual void add(double val); @@ -127,6 +128,8 @@ double oldest() const; // the oldest valid value in the sequence double predict_next() const; // prediction based on linear regression + virtual void reset(); + bool is_full() const { return _length == _num; } // Debugging/Printing virtual void dump_on(outputStream* s); }; --- old/test/hotspot/gtest/gc/g1/test_g1IHOPControl.cpp 2020-06-10 10:12:08.697172551 +0200 +++ new/test/hotspot/gtest/gc/g1/test_g1IHOPControl.cpp 2020-06-10 10:12:08.609169527 +0200 @@ -31,7 +31,8 @@ size_t alloc_amount, size_t young_size, double mark_time) { for (int i = 0; i < 100; i++) { - ctrl->update_allocation_info(alloc_time, alloc_amount, young_size); + ctrl->update_allocation_info(alloc_time, alloc_amount); + ctrl->update_additional_buffer(young_size); ctrl->update_marking_length(mark_time); } } @@ -51,7 +52,8 @@ size_t threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(initial_ihop, threshold); - ctrl.update_allocation_info(100.0, 100, 100); + ctrl.update_allocation_info(100.0, 100); + ctrl.update_additional_buffer(100); threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(initial_ihop, threshold); @@ -102,7 +104,8 @@ EXPECT_EQ(initial_threshold, threshold); for (size_t i = 0; i < G1AdaptiveIHOPNumInitialSamples - 1; i++) { - ctrl.update_allocation_info(alloc_time1, alloc_amount1, young_size); + ctrl.update_allocation_info(alloc_time1, alloc_amount1); + ctrl.update_additional_buffer(young_size); ctrl.update_marking_length(marking_time1); // Not enough data yet. threshold = ctrl.get_conc_mark_start_threshold(); --- old/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java 2020-06-10 10:12:09.129187393 +0200 +++ new/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java 2020-06-10 10:12:09.033184095 +0200 @@ -138,7 +138,7 @@ new LogMessageWithLevel("String Deduplication", Level.DEBUG), new LogMessageWithLevel("Queue Fixup", Level.DEBUG), new LogMessageWithLevel("Table Fixup", Level.DEBUG), - new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG), + new LogMessageWithLevel("Resize Heap After Collection", Level.DEBUG), new LogMessageWithLevel("Region Register", Level.DEBUG), new LogMessageWithLevel("Prepare Heap Roots", Level.DEBUG), new LogMessageWithLevel("Concatenate Dirty Card Logs", Level.DEBUG),