1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.hpp" 27 #include "gc/g1/g1HeapSizingPolicy.hpp" 28 #include "gc/g1/g1Analytics.hpp" 29 #include "gc/g1/g1Policy.hpp" 30 #include "logging/log.hpp" 31 #include "runtime/globals.hpp" 32 #include "utilities/debug.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 35 G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1h, const G1Analytics* analytics) { 36 return new G1HeapSizingPolicy(g1h, analytics); 37 } 38 39 G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) : 40 _g1h(g1h), 41 _analytics(analytics), 42 _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()), 43 _minimum_desired_bytes_after_last_cm(MinHeapSize) { 44 45 assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics); 46 clear_ratio_check_data(); 47 } 48 49 void G1HeapSizingPolicy::clear_ratio_check_data() { 50 _ratio_over_threshold_count = 0; 51 _ratio_over_threshold_sum = 0.0; 52 _pauses_since_start = 0; 53 } 54 55 size_t G1HeapSizingPolicy::expansion_amount_after_young_collection() { 56 double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; 57 double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; 58 assert(GCTimeRatio > 0, 59 "we should have set it to a default value set_g1_gc_flags() " 60 "if a user set it to 0"); 61 const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 62 63 double threshold = gc_overhead_percent; 64 size_t expand_bytes = 0; 65 66 // If the heap is at less than half its maximum size, scale the threshold down, 67 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 68 // though the scaling code will likely keep the increase small. 69 if (_g1h->capacity() <= _g1h->max_capacity() / 2) { 70 threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2); 71 threshold = MAX2(threshold, 1.0); 72 } 73 74 // If the last GC time ratio is over the threshold, increment the count of 75 // times it has been exceeded, and add this ratio to the sum of exceeded 76 // ratios. 77 if (last_gc_overhead > threshold) { 78 _ratio_over_threshold_count++; 79 _ratio_over_threshold_sum += last_gc_overhead; 80 } 81 82 // Check if we've had enough GC time ratio checks that were over the 83 // threshold to trigger an expansion. We'll also expand if we've 84 // reached the end of the history buffer and the average of all entries 85 // is still over the threshold. This indicates a smaller number of GCs were 86 // long enough to make the average exceed the threshold. 87 bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; 88 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 89 (filled_history_buffer && (recent_gc_overhead > threshold))) { 90 size_t min_expand_bytes = HeapRegion::GrainBytes; 91 size_t reserved_bytes = _g1h->max_capacity(); 92 size_t committed_bytes = _g1h->capacity(); 93 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 94 size_t expand_bytes_via_pct = 95 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 96 double scale_factor = 1.0; 97 98 // If the current size is less than 1/4 of the Initial heap size, expand 99 // by half of the delta between the current and Initial sizes. IE, grow 100 // back quickly. 101 // 102 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 103 // the available expansion space, whichever is smaller, as the base 104 // expansion size. Then possibly scale this size according to how much the 105 // threshold has (on average) been exceeded by. If the delta is small 106 // (less than the StartScaleDownAt value), scale the size down linearly, but 107 // not by less than MinScaleDownFactor. If the delta is large (greater than 108 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 109 // times the base size. The scaling will be linear in the range from 110 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 111 // ScaleUpRange sets the rate of scaling up. 112 if (committed_bytes < InitialHeapSize / 4) { 113 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 114 } else { 115 double const MinScaleDownFactor = 0.2; 116 double const MaxScaleUpFactor = 2; 117 double const StartScaleDownAt = gc_overhead_percent; 118 double const StartScaleUpAt = gc_overhead_percent * 1.5; 119 double const ScaleUpRange = gc_overhead_percent * 2.0; 120 121 double ratio_delta; 122 if (filled_history_buffer) { 123 ratio_delta = recent_gc_overhead - threshold; 124 } else { 125 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 126 } 127 128 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 129 if (ratio_delta < StartScaleDownAt) { 130 scale_factor = ratio_delta / StartScaleDownAt; 131 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 132 } else if (ratio_delta > StartScaleUpAt) { 133 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 134 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 135 } 136 } 137 138 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 139 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 140 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 141 142 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 143 144 // Ensure the expansion size is at least the minimum growth amount 145 // and at most the remaining uncommitted byte size. 146 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 147 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 148 149 clear_ratio_check_data(); 150 } else { 151 // An expansion was not triggered. If we've started counting, increment 152 // the number of checks we've made in the current window. If we've 153 // reached the end of the window without resizing, clear the counters to 154 // start again the next time we see a ratio above the threshold. 155 if (_ratio_over_threshold_count > 0) { 156 _pauses_since_start++; 157 if (_pauses_since_start > _num_prev_pauses_for_heuristics) { 158 clear_ratio_check_data(); 159 } 160 } 161 } 162 163 return expand_bytes; 164 } 165 166 size_t G1HeapSizingPolicy::expansion_amount_after_concurrent_mark() { 167 size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); 168 _minimum_desired_bytes_after_last_cm = _g1h->policy()->minimum_desired_bytes_after_concurrent_mark(cur_used_bytes); 169 170 return _minimum_desired_bytes_after_last_cm > _g1h->capacity() ? 171 _minimum_desired_bytes_after_last_cm - _g1h->capacity() : 0; 172 } 173 174 size_t G1HeapSizingPolicy::shrink_amount_after_mixed_collections() { 175 size_t shrink_bytes = 0; 176 const size_t capacity_after_gc = _g1h->capacity(); 177 const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes(); 178 179 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; 180 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 181 182 double used_after_gc_d = (double) used_after_gc; 183 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; 184 // Let's make sure that they are both under the max heap size, which 185 // by default will make them fit into a size_t. 186 double desired_capacity_upper_bound = (double) MaxHeapSize; 187 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, 188 desired_capacity_upper_bound); 189 // We can now safely turn them into size_t's. 190 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; 191 192 // soft_max_capacity can be smaller 193 maximum_desired_capacity = MIN2(maximum_desired_capacity, _g1h->soft_max_capacity()); 194 195 // Make sure not less than _minimum_desired_bytes_after_last_cm 196 maximum_desired_capacity = MAX2(maximum_desired_capacity, _minimum_desired_bytes_after_last_cm); 197 198 if (capacity_after_gc > maximum_desired_capacity) { 199 shrink_bytes = capacity_after_gc - maximum_desired_capacity; 200 } 201 202 return shrink_bytes; 203 }