Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
+++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
1 1 /*
2 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc/g1/g1CollectedHeap.hpp"
27 27 #include "gc/g1/g1HeapSizingPolicy.hpp"
28 28 #include "gc/g1/g1Analytics.hpp"
29 +#include "gc/g1/g1Policy.hpp"
29 30 #include "logging/log.hpp"
30 31 #include "runtime/globals.hpp"
31 32 #include "utilities/debug.hpp"
32 33 #include "utilities/globalDefinitions.hpp"
33 34
34 35 G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1h, const G1Analytics* analytics) {
35 36 return new G1HeapSizingPolicy(g1h, analytics);
36 37 }
37 38
38 39 G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) :
39 40 _g1h(g1h),
40 41 _analytics(analytics),
41 - _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
42 + _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()),
43 + _minimum_desired_bytes_after_last_cm(MinHeapSize) {
42 44
43 45 assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
44 46 clear_ratio_check_data();
45 47 }
46 48
47 49 void G1HeapSizingPolicy::clear_ratio_check_data() {
48 50 _ratio_over_threshold_count = 0;
49 51 _ratio_over_threshold_sum = 0.0;
50 52 _pauses_since_start = 0;
51 53 }
52 54
53 -size_t G1HeapSizingPolicy::expansion_amount() {
55 +size_t G1HeapSizingPolicy::expansion_amount_after_young_collection() {
54 56 double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
55 57 double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
56 58 assert(GCTimeRatio > 0,
57 59 "we should have set it to a default value set_g1_gc_flags() "
58 60 "if a user set it to 0");
59 61 const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio));
60 62
61 63 double threshold = gc_overhead_percent;
62 64 size_t expand_bytes = 0;
63 65
64 66 // If the heap is at less than half its maximum size, scale the threshold down,
65 67 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
66 68 // though the scaling code will likely keep the increase small.
67 69 if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
68 70 threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
69 71 threshold = MAX2(threshold, 1.0);
70 72 }
71 73
72 74 // If the last GC time ratio is over the threshold, increment the count of
73 75 // times it has been exceeded, and add this ratio to the sum of exceeded
74 76 // ratios.
75 77 if (last_gc_overhead > threshold) {
76 78 _ratio_over_threshold_count++;
77 79 _ratio_over_threshold_sum += last_gc_overhead;
78 80 }
79 81
80 82 // Check if we've had enough GC time ratio checks that were over the
81 83 // threshold to trigger an expansion. We'll also expand if we've
82 84 // reached the end of the history buffer and the average of all entries
83 85 // is still over the threshold. This indicates a smaller number of GCs were
84 86 // long enough to make the average exceed the threshold.
85 87 bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
86 88 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
87 89 (filled_history_buffer && (recent_gc_overhead > threshold))) {
88 90 size_t min_expand_bytes = HeapRegion::GrainBytes;
89 91 size_t reserved_bytes = _g1h->max_capacity();
90 92 size_t committed_bytes = _g1h->capacity();
91 93 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
92 94 size_t expand_bytes_via_pct =
93 95 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
94 96 double scale_factor = 1.0;
95 97
96 98 // If the current size is less than 1/4 of the Initial heap size, expand
97 99 // by half of the delta between the current and Initial sizes. IE, grow
98 100 // back quickly.
99 101 //
100 102 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
101 103 // the available expansion space, whichever is smaller, as the base
102 104 // expansion size. Then possibly scale this size according to how much the
103 105 // threshold has (on average) been exceeded by. If the delta is small
104 106 // (less than the StartScaleDownAt value), scale the size down linearly, but
105 107 // not by less than MinScaleDownFactor. If the delta is large (greater than
106 108 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
107 109 // times the base size. The scaling will be linear in the range from
108 110 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
109 111 // ScaleUpRange sets the rate of scaling up.
110 112 if (committed_bytes < InitialHeapSize / 4) {
111 113 expand_bytes = (InitialHeapSize - committed_bytes) / 2;
112 114 } else {
113 115 double const MinScaleDownFactor = 0.2;
114 116 double const MaxScaleUpFactor = 2;
115 117 double const StartScaleDownAt = gc_overhead_percent;
116 118 double const StartScaleUpAt = gc_overhead_percent * 1.5;
117 119 double const ScaleUpRange = gc_overhead_percent * 2.0;
118 120
119 121 double ratio_delta;
120 122 if (filled_history_buffer) {
121 123 ratio_delta = recent_gc_overhead - threshold;
122 124 } else {
123 125 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
124 126 }
125 127
126 128 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
127 129 if (ratio_delta < StartScaleDownAt) {
128 130 scale_factor = ratio_delta / StartScaleDownAt;
129 131 scale_factor = MAX2(scale_factor, MinScaleDownFactor);
130 132 } else if (ratio_delta > StartScaleUpAt) {
131 133 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
132 134 scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
133 135 }
134 136 }
135 137
136 138 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
137 139 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
138 140 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
139 141
140 142 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
141 143
142 144 // Ensure the expansion size is at least the minimum growth amount
143 145 // and at most the remaining uncommitted byte size.
144 146 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
145 147 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
146 148
147 149 clear_ratio_check_data();
148 150 } else {
149 151 // An expansion was not triggered. If we've started counting, increment
150 152 // the number of checks we've made in the current window. If we've
151 153 // reached the end of the window without resizing, clear the counters to
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
152 154 // start again the next time we see a ratio above the threshold.
153 155 if (_ratio_over_threshold_count > 0) {
154 156 _pauses_since_start++;
155 157 if (_pauses_since_start > _num_prev_pauses_for_heuristics) {
156 158 clear_ratio_check_data();
157 159 }
158 160 }
159 161 }
160 162
161 163 return expand_bytes;
164 +}
165 +
166 +size_t G1HeapSizingPolicy::target_heap_capacity(size_t used_bytes, uintx free_ratio) {
167 + const double free_percentage = (double) free_ratio / 100.0;
168 + const double used_percentage = 1.0 - free_percentage;
169 +
170 + // We have to be careful here as these two calculations can overflow
171 + // 32-bit size_t's.
172 + double used_bytes_d = (double) used_bytes;
173 + double desired_capacity_d = used_bytes_d / used_percentage;
174 + // Let's make sure that they are both under the max heap size, which
175 + // by default will make it fit into a size_t.
176 + double desired_capacity_upper_bound = (double) MaxHeapSize;
177 + desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound);
178 + // We can now safely turn it into size_t's.
179 + return (size_t) desired_capacity_d;
180 +}
181 +
182 +size_t G1HeapSizingPolicy::expansion_amount_after_concurrent_mark() {
183 + size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
184 +
185 + size_t minimum_desired_capacity = target_heap_capacity(cur_used_bytes, MinHeapFreeRatio);
186 +
187 + _minimum_desired_bytes_after_last_cm = _g1h->policy()->minimum_desired_bytes_after_concurrent_mark(cur_used_bytes);
188 + // Use the smaller one between minimum_desired_capacity
189 + // and predicted minimum_desired_bytes_after_concurrent_mark
190 + // We still use minimum_desired_capacity because minimum_desired_bytes_after_concurrent_mark
191 + // might include a lot of new allocated humongous objects
192 + _minimum_desired_bytes_after_last_cm = MIN2(_minimum_desired_bytes_after_last_cm, minimum_desired_capacity);
193 +
194 + return _minimum_desired_bytes_after_last_cm > _g1h->capacity() ?
195 + _minimum_desired_bytes_after_last_cm - _g1h->capacity() : 0;
196 +}
197 +
198 +size_t G1HeapSizingPolicy::shrink_amount_after_mixed_collections() {
199 + size_t shrink_bytes = 0;
200 + const size_t capacity_after_gc = _g1h->capacity();
201 + const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes();
202 + size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
203 + // soft_max_capacity can be smaller
204 + maximum_desired_capacity = MIN2(maximum_desired_capacity, _g1h->soft_max_capacity());
205 + // Make sure not less than _minimum_desired_bytes_after_last_cm
206 + maximum_desired_capacity = MAX2(maximum_desired_capacity, _minimum_desired_bytes_after_last_cm);
207 +
208 + if (capacity_after_gc > maximum_desired_capacity) {
209 + shrink_bytes = capacity_after_gc - maximum_desired_capacity;
210 + }
211 +
212 + return shrink_bytes;
162 213 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX