1 /*
2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
126 return delta;
127 }
128
129 size_t MetaspaceGC::capacity_until_GC() {
130 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
131 assert(value >= MetaspaceSize, "Not initialized properly?");
132 return value;
133 }
134
135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
136 assert_is_aligned(v, Metaspace::commit_alignment());
137
138 size_t old_capacity_until_GC = _capacity_until_GC;
139 size_t new_value = old_capacity_until_GC + v;
140
141 if (new_value < old_capacity_until_GC) {
142 // The addition wrapped around, set new_value to aligned max value.
143 new_value = align_down(max_uintx, Metaspace::commit_alignment());
144 }
145
146 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
147
148 if (old_capacity_until_GC != prev_value) {
149 return false;
150 }
151
152 if (new_cap_until_GC != NULL) {
153 *new_cap_until_GC = new_value;
154 }
155 if (old_cap_until_GC != NULL) {
156 *old_cap_until_GC = old_capacity_until_GC;
157 }
158 return true;
159 }
160
161 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
162 assert_is_aligned(v, Metaspace::commit_alignment());
163
164 return Atomic::sub(v, &_capacity_until_GC);
165 }
219 assert(_shrink_factor <= 100, "invalid shrink factor");
220 uint current_shrink_factor = _shrink_factor;
221 _shrink_factor = 0;
222
223 // Using committed_bytes() for used_after_gc is an overestimation, since the
224 // chunk free lists are included in committed_bytes() and the memory in an
225 // un-fragmented chunk free list is available for future allocations.
226 // However, if the chunk free lists becomes fragmented, then the memory may
227 // not be available for future allocations and the memory is therefore "in use".
228 // Including the chunk free lists in the definition of "in use" is therefore
229 // necessary. Not including the chunk free lists can cause capacity_until_GC to
230 // shrink below committed_bytes() and this has caused serious bugs in the past.
231 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
232 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
233
234 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
235 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
236
237 const double min_tmp = used_after_gc / maximum_used_percentage;
238 size_t minimum_desired_capacity =
239 (size_t)MIN2(min_tmp, double(max_uintx));
240 // Don't shrink less than the initial generation size
241 minimum_desired_capacity = MAX2(minimum_desired_capacity,
242 MetaspaceSize);
243
244 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
245 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
246 minimum_free_percentage, maximum_used_percentage);
247 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
248
249
250 size_t shrink_bytes = 0;
251 if (capacity_until_GC < minimum_desired_capacity) {
252 // If we have less capacity below the metaspace HWM, then
253 // increment the HWM.
254 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
255 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
256 // Don't expand unless it's significant
257 if (expand_bytes >= MinMetaspaceExpansion) {
258 size_t new_capacity_until_GC = 0;
259 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
266 minimum_desired_capacity / (double) K,
267 expand_bytes / (double) K,
268 MinMetaspaceExpansion / (double) K,
269 new_capacity_until_GC / (double) K);
270 }
271 return;
272 }
273
274 // No expansion, now see if we want to shrink
275 // We would never want to shrink more than this
276 assert(capacity_until_GC >= minimum_desired_capacity,
277 SIZE_FORMAT " >= " SIZE_FORMAT,
278 capacity_until_GC, minimum_desired_capacity);
279 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
280
281 // Should shrinking be considered?
282 if (MaxMetaspaceFreeRatio < 100) {
283 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
284 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
285 const double max_tmp = used_after_gc / minimum_used_percentage;
286 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
287 maximum_desired_capacity = MAX2(maximum_desired_capacity,
288 MetaspaceSize);
289 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
290 maximum_free_percentage, minimum_used_percentage);
291 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
292 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
293
294 assert(minimum_desired_capacity <= maximum_desired_capacity,
295 "sanity check");
296
297 if (capacity_until_GC > maximum_desired_capacity) {
298 // Capacity too large, compute shrinking size
299 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
300 // We don't want shrink all the way back to initSize if people call
301 // System.gc(), because some programs do that between "phases" and then
302 // we'd just have to grow the heap up again for the next phase. So we
303 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
304 // on the third call, and 100% by the fourth call. But if we recompute
305 // size without shrinking, it goes back to 0%.
306 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1473 return class_vsm()->allocate(word_size);
1474 } else {
1475 return vsm()->allocate(word_size);
1476 }
1477 }
1478
1479 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1480 Metaspace::assert_not_frozen();
1481 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1482 assert(delta_bytes > 0, "Must be");
1483
1484 size_t before = 0;
1485 size_t after = 0;
1486 MetaWord* res;
1487 bool incremented;
1488
1489 // Each thread increments the HWM at most once. Even if the thread fails to increment
1490 // the HWM, an allocation is still attempted. This is because another thread must then
1491 // have incremented the HWM and therefore the allocation might still succeed.
1492 do {
1493 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1494 res = allocate(word_size, mdtype);
1495 } while (!incremented && res == NULL);
1496
1497 if (incremented) {
1498 Metaspace::tracer()->report_gc_threshold(before, after,
1499 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1500 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1501 }
1502
1503 return res;
1504 }
1505
1506 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1507 return (vsm()->used_words() +
1508 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1509 }
1510
1511 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1512 return (vsm()->capacity_words() +
|
1 /*
2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
126 return delta;
127 }
128
129 size_t MetaspaceGC::capacity_until_GC() {
130 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
131 assert(value >= MetaspaceSize, "Not initialized properly?");
132 return value;
133 }
134
135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
136 assert_is_aligned(v, Metaspace::commit_alignment());
137
138 size_t old_capacity_until_GC = _capacity_until_GC;
139 size_t new_value = old_capacity_until_GC + v;
140
141 if (new_value < old_capacity_until_GC) {
142 // The addition wrapped around, set new_value to aligned max value.
143 new_value = align_down(max_uintx, Metaspace::commit_alignment());
144 }
145
146 assert(new_value <= MaxMetaspaceSize,
147 "new_value: " SIZE_FORMAT " > MaxMetaspaceSize: " SIZE_FORMAT,
148 new_value, MaxMetaspaceSize);
149
150 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
151
152 if (old_capacity_until_GC != prev_value) {
153 return false;
154 }
155
156 if (new_cap_until_GC != NULL) {
157 *new_cap_until_GC = new_value;
158 }
159 if (old_cap_until_GC != NULL) {
160 *old_cap_until_GC = old_capacity_until_GC;
161 }
162 return true;
163 }
164
165 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
166 assert_is_aligned(v, Metaspace::commit_alignment());
167
168 return Atomic::sub(v, &_capacity_until_GC);
169 }
223 assert(_shrink_factor <= 100, "invalid shrink factor");
224 uint current_shrink_factor = _shrink_factor;
225 _shrink_factor = 0;
226
227 // Using committed_bytes() for used_after_gc is an overestimation, since the
228 // chunk free lists are included in committed_bytes() and the memory in an
229 // un-fragmented chunk free list is available for future allocations.
230 // However, if the chunk free lists becomes fragmented, then the memory may
231 // not be available for future allocations and the memory is therefore "in use".
232 // Including the chunk free lists in the definition of "in use" is therefore
233 // necessary. Not including the chunk free lists can cause capacity_until_GC to
234 // shrink below committed_bytes() and this has caused serious bugs in the past.
235 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
236 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
237
238 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
239 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
240
241 const double min_tmp = used_after_gc / maximum_used_percentage;
242 size_t minimum_desired_capacity =
243 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
244 // Don't shrink less than the initial generation size
245 minimum_desired_capacity = MAX2(minimum_desired_capacity,
246 MetaspaceSize);
247
248 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
249 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
250 minimum_free_percentage, maximum_used_percentage);
251 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
252
253
254 size_t shrink_bytes = 0;
255 if (capacity_until_GC < minimum_desired_capacity) {
256 // If we have less capacity below the metaspace HWM, then
257 // increment the HWM.
258 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
259 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
260 // Don't expand unless it's significant
261 if (expand_bytes >= MinMetaspaceExpansion) {
262 size_t new_capacity_until_GC = 0;
263 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
270 minimum_desired_capacity / (double) K,
271 expand_bytes / (double) K,
272 MinMetaspaceExpansion / (double) K,
273 new_capacity_until_GC / (double) K);
274 }
275 return;
276 }
277
278 // No expansion, now see if we want to shrink
279 // We would never want to shrink more than this
280 assert(capacity_until_GC >= minimum_desired_capacity,
281 SIZE_FORMAT " >= " SIZE_FORMAT,
282 capacity_until_GC, minimum_desired_capacity);
283 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
284
285 // Should shrinking be considered?
286 if (MaxMetaspaceFreeRatio < 100) {
287 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
288 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
289 const double max_tmp = used_after_gc / minimum_used_percentage;
290 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
291 maximum_desired_capacity = MAX2(maximum_desired_capacity,
292 MetaspaceSize);
293 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
294 maximum_free_percentage, minimum_used_percentage);
295 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
296 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
297
298 assert(minimum_desired_capacity <= maximum_desired_capacity,
299 "sanity check");
300
301 if (capacity_until_GC > maximum_desired_capacity) {
302 // Capacity too large, compute shrinking size
303 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
304 // We don't want shrink all the way back to initSize if people call
305 // System.gc(), because some programs do that between "phases" and then
306 // we'd just have to grow the heap up again for the next phase. So we
307 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
308 // on the third call, and 100% by the fourth call. But if we recompute
309 // size without shrinking, it goes back to 0%.
310 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1477 return class_vsm()->allocate(word_size);
1478 } else {
1479 return vsm()->allocate(word_size);
1480 }
1481 }
1482
1483 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1484 Metaspace::assert_not_frozen();
1485 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1486 assert(delta_bytes > 0, "Must be");
1487
1488 size_t before = 0;
1489 size_t after = 0;
1490 MetaWord* res;
1491 bool incremented;
1492
1493 // Each thread increments the HWM at most once. Even if the thread fails to increment
1494 // the HWM, an allocation is still attempted. This is because another thread must then
1495 // have incremented the HWM and therefore the allocation might still succeed.
1496 do {
1497 if (MetaspaceGC::capacity_until_GC() + delta_bytes > MaxMetaspaceSize) {
1498 return NULL;
1499 }
1500 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1501 res = allocate(word_size, mdtype);
1502 } while (!incremented && res == NULL);
1503
1504 if (incremented) {
1505 Metaspace::tracer()->report_gc_threshold(before, after,
1506 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1507 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1508 }
1509
1510 return res;
1511 }
1512
1513 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1514 return (vsm()->used_words() +
1515 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1516 }
1517
1518 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1519 return (vsm()->capacity_words() +
|