Print this page
8236073: G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
+++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
1 1 /*
2 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc/g1/g1CollectedHeap.hpp"
27 27 #include "gc/g1/g1HeapSizingPolicy.hpp"
28 28 #include "gc/g1/g1Analytics.hpp"
29 +#include "gc/g1/g1Policy.hpp"
29 30 #include "logging/log.hpp"
30 31 #include "runtime/globals.hpp"
31 32 #include "utilities/debug.hpp"
32 33 #include "utilities/globalDefinitions.hpp"
33 34
34 35 G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1h, const G1Analytics* analytics) {
35 36 return new G1HeapSizingPolicy(g1h, analytics);
36 37 }
37 38
38 39 G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) :
39 40 _g1h(g1h),
40 41 _analytics(analytics),
41 42 _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
42 43
43 44 assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
44 45 clear_ratio_check_data();
45 46 }
46 47
47 48 void G1HeapSizingPolicy::clear_ratio_check_data() {
48 49 _ratio_over_threshold_count = 0;
49 50 _ratio_over_threshold_sum = 0.0;
50 51 _pauses_since_start = 0;
51 52 }
52 53
53 54 size_t G1HeapSizingPolicy::expansion_amount() {
54 55 double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
55 56 double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
56 57 assert(GCTimeRatio > 0,
57 58 "we should have set it to a default value set_g1_gc_flags() "
58 59 "if a user set it to 0");
59 60 const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio));
60 61
61 62 double threshold = gc_overhead_percent;
62 63 size_t expand_bytes = 0;
63 64
64 65 // If the heap is at less than half its maximum size, scale the threshold down,
65 66 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
66 67 // though the scaling code will likely keep the increase small.
67 68 if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
68 69 threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
69 70 threshold = MAX2(threshold, 1.0);
70 71 }
71 72
72 73 // If the last GC time ratio is over the threshold, increment the count of
73 74 // times it has been exceeded, and add this ratio to the sum of exceeded
74 75 // ratios.
75 76 if (last_gc_overhead > threshold) {
76 77 _ratio_over_threshold_count++;
77 78 _ratio_over_threshold_sum += last_gc_overhead;
78 79 }
79 80
80 81 // Check if we've had enough GC time ratio checks that were over the
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
81 82 // threshold to trigger an expansion. We'll also expand if we've
82 83 // reached the end of the history buffer and the average of all entries
83 84 // is still over the threshold. This indicates a smaller number of GCs were
84 85 // long enough to make the average exceed the threshold.
85 86 bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
86 87 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
87 88 (filled_history_buffer && (recent_gc_overhead > threshold))) {
88 89 size_t min_expand_bytes = HeapRegion::GrainBytes;
89 90 size_t reserved_bytes = _g1h->max_capacity();
90 91 size_t committed_bytes = _g1h->capacity();
92 + if (committed_bytes <= SoftMaxHeapSize) {
93 + // Use SoftMaxHeapSize to limit the max size
94 + reserved_bytes = SoftMaxHeapSize;
95 + }
91 96 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
92 97 size_t expand_bytes_via_pct =
93 98 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
94 99 double scale_factor = 1.0;
95 100
96 101 // If the current size is less than 1/4 of the Initial heap size, expand
97 102 // by half of the delta between the current and Initial sizes. IE, grow
98 103 // back quickly.
99 104 //
100 105 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
101 106 // the available expansion space, whichever is smaller, as the base
102 107 // expansion size. Then possibly scale this size according to how much the
103 108 // threshold has (on average) been exceeded by. If the delta is small
104 109 // (less than the StartScaleDownAt value), scale the size down linearly, but
105 110 // not by less than MinScaleDownFactor. If the delta is large (greater than
106 111 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
107 112 // times the base size. The scaling will be linear in the range from
108 113 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
109 114 // ScaleUpRange sets the rate of scaling up.
110 115 if (committed_bytes < InitialHeapSize / 4) {
111 116 expand_bytes = (InitialHeapSize - committed_bytes) / 2;
112 117 } else {
113 118 double const MinScaleDownFactor = 0.2;
114 119 double const MaxScaleUpFactor = 2;
115 120 double const StartScaleDownAt = gc_overhead_percent;
116 121 double const StartScaleUpAt = gc_overhead_percent * 1.5;
117 122 double const ScaleUpRange = gc_overhead_percent * 2.0;
118 123
119 124 double ratio_delta;
120 125 if (filled_history_buffer) {
121 126 ratio_delta = recent_gc_overhead - threshold;
122 127 } else {
123 128 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
124 129 }
125 130
126 131 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
127 132 if (ratio_delta < StartScaleDownAt) {
128 133 scale_factor = ratio_delta / StartScaleDownAt;
129 134 scale_factor = MAX2(scale_factor, MinScaleDownFactor);
130 135 } else if (ratio_delta > StartScaleUpAt) {
131 136 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
132 137 scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
133 138 }
134 139 }
135 140
136 141 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
137 142 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
138 143 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
139 144
140 145 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
141 146
142 147 // Ensure the expansion size is at least the minimum growth amount
143 148 // and at most the remaining uncommitted byte size.
144 149 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
145 150 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
146 151
147 152 clear_ratio_check_data();
148 153 } else {
149 154 // An expansion was not triggered. If we've started counting, increment
150 155 // the number of checks we've made in the current window. If we've
151 156 // reached the end of the window without resizing, clear the counters to
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
152 157 // start again the next time we see a ratio above the threshold.
153 158 if (_ratio_over_threshold_count > 0) {
154 159 _pauses_since_start++;
155 160 if (_pauses_since_start > _num_prev_pauses_for_heuristics) {
156 161 clear_ratio_check_data();
157 162 }
158 163 }
159 164 }
160 165
161 166 return expand_bytes;
167 +}
168 +
169 +bool G1HeapSizingPolicy::can_shrink_heap_size_to(size_t heap_size) {
170 + size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
171 + uint used_regions = cur_used_bytes / HeapRegion::GrainBytes;
172 + uint new_number_of_regions = heap_size / HeapRegion::GrainBytes;
173 + // re-calculate the necessary reserve
174 + double reserve_regions_d = (double) new_number_of_regions * _g1h->policy()->_reserve_factor;
175 + // We use ceiling so that if reserve_regions_d is > 0.0 (but
176 + // smaller than 1.0) we'll get 1.
177 + uint reserve_regions = (uint) ceil(reserve_regions_d);
178 + if (new_number_of_regions <= (reserve_regions + used_regions)) {
179 + // No left for young generation
180 + return false;
181 + }
182 + // Rest region number for young gen
183 + uint young_regions = new_number_of_regions - reserve_regions - used_regions;
184 +
185 + // re-calculate the young length
186 + uint min_young_length;
187 + uint max_young_length;
188 + _g1h->policy()->_young_gen_sizer->recalculate_min_max_young_length(new_number_of_regions,
189 + &min_young_length,
190 + &max_young_length);
191 + // Rest young region length must be larger than min young length
192 + return young_regions >= max_young_length;
162 193 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX