115 // allocation so make the delta greater than just enough
116 // for this allocation.
117 delta = max_delta;
118 } else {
119 // This allocation is large but the next ones are probably not
120 // so increase by the minimum.
121 delta = delta + min_delta;
122 }
123
124 assert_is_aligned(delta, Metaspace::commit_alignment());
125
126 return delta;
127 }
128
129 size_t MetaspaceGC::capacity_until_GC() {
130 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
131 assert(value >= MetaspaceSize, "Not initialized properly?");
132 return value;
133 }
134
135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
136 assert_is_aligned(v, Metaspace::commit_alignment());
137
138 size_t old_capacity_until_GC = _capacity_until_GC;
139 size_t new_value = old_capacity_until_GC + v;
140
141 if (new_value < old_capacity_until_GC) {
142 // The addition wrapped around, set new_value to aligned max value.
143 new_value = align_down(max_uintx, Metaspace::commit_alignment());
144 }
145
146 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
147
148 if (old_capacity_until_GC != prev_value) {
149 return false;
150 }
151
152 if (new_cap_until_GC != NULL) {
153 *new_cap_until_GC = new_value;
154 }
155 if (old_cap_until_GC != NULL) {
156 *old_cap_until_GC = old_capacity_until_GC;
157 }
158 return true;
159 }
160
161 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
162 assert_is_aligned(v, Metaspace::commit_alignment());
163
164 return Atomic::sub(v, &_capacity_until_GC);
165 }
219 assert(_shrink_factor <= 100, "invalid shrink factor");
220 uint current_shrink_factor = _shrink_factor;
221 _shrink_factor = 0;
222
223 // Using committed_bytes() for used_after_gc is an overestimation, since the
224 // chunk free lists are included in committed_bytes() and the memory in an
225 // un-fragmented chunk free list is available for future allocations.
226 // However, if the chunk free lists becomes fragmented, then the memory may
227 // not be available for future allocations and the memory is therefore "in use".
228 // Including the chunk free lists in the definition of "in use" is therefore
229 // necessary. Not including the chunk free lists can cause capacity_until_GC to
230 // shrink below committed_bytes() and this has caused serious bugs in the past.
231 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
232 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
233
234 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
235 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
236
237 const double min_tmp = used_after_gc / maximum_used_percentage;
238 size_t minimum_desired_capacity =
239 (size_t)MIN2(min_tmp, double(max_uintx));
240 // Don't shrink less than the initial generation size
241 minimum_desired_capacity = MAX2(minimum_desired_capacity,
242 MetaspaceSize);
243
244 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
245 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
246 minimum_free_percentage, maximum_used_percentage);
247 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
248
249
250 size_t shrink_bytes = 0;
251 if (capacity_until_GC < minimum_desired_capacity) {
252 // If we have less capacity below the metaspace HWM, then
253 // increment the HWM.
254 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
255 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
256 // Don't expand unless it's significant
257 if (expand_bytes >= MinMetaspaceExpansion) {
258 size_t new_capacity_until_GC = 0;
259 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
266 minimum_desired_capacity / (double) K,
267 expand_bytes / (double) K,
268 MinMetaspaceExpansion / (double) K,
269 new_capacity_until_GC / (double) K);
270 }
271 return;
272 }
273
274 // No expansion, now see if we want to shrink
275 // We would never want to shrink more than this
276 assert(capacity_until_GC >= minimum_desired_capacity,
277 SIZE_FORMAT " >= " SIZE_FORMAT,
278 capacity_until_GC, minimum_desired_capacity);
279 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
280
281 // Should shrinking be considered?
282 if (MaxMetaspaceFreeRatio < 100) {
283 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
284 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
285 const double max_tmp = used_after_gc / minimum_used_percentage;
286 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
287 maximum_desired_capacity = MAX2(maximum_desired_capacity,
288 MetaspaceSize);
289 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
290 maximum_free_percentage, minimum_used_percentage);
291 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
292 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
293
294 assert(minimum_desired_capacity <= maximum_desired_capacity,
295 "sanity check");
296
297 if (capacity_until_GC > maximum_desired_capacity) {
298 // Capacity too large, compute shrinking size
299 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
300 // We don't want shrink all the way back to initSize if people call
301 // System.gc(), because some programs do that between "phases" and then
302 // we'd just have to grow the heap up again for the next phase. So we
303 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
304 // on the third call, and 100% by the fourth call. But if we recompute
305 // size without shrinking, it goes back to 0%.
306 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1453 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1454 Metaspace::assert_not_frozen();
1455
1456 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1457
1458 // Don't use class_vsm() unless UseCompressedClassPointers is true.
1459 if (Metaspace::is_class_space_allocation(mdtype)) {
1460 return class_vsm()->allocate(word_size);
1461 } else {
1462 return vsm()->allocate(word_size);
1463 }
1464 }
1465
1466 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1467 Metaspace::assert_not_frozen();
1468 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1469 assert(delta_bytes > 0, "Must be");
1470
1471 size_t before = 0;
1472 size_t after = 0;
1473 MetaWord* res;
1474 bool incremented;
1475
1476 // Each thread increments the HWM at most once. Even if the thread fails to increment
1477 // the HWM, an allocation is still attempted. This is because another thread must then
1478 // have incremented the HWM and therefore the allocation might still succeed.
1479 do {
1480 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1481 res = allocate(word_size, mdtype);
1482 } while (!incremented && res == NULL);
1483
1484 if (incremented) {
1485 Metaspace::tracer()->report_gc_threshold(before, after,
1486 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1487 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1488 }
1489
1490 return res;
1491 }
1492
1493 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1494 return (vsm()->used_words() +
1495 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1496 }
1497
1498 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1499 return (vsm()->capacity_words() +
1500 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1501 }
1502
|
115 // allocation so make the delta greater than just enough
116 // for this allocation.
117 delta = max_delta;
118 } else {
119 // This allocation is large but the next ones are probably not
120 // so increase by the minimum.
121 delta = delta + min_delta;
122 }
123
124 assert_is_aligned(delta, Metaspace::commit_alignment());
125
126 return delta;
127 }
128
129 size_t MetaspaceGC::capacity_until_GC() {
130 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
131 assert(value >= MetaspaceSize, "Not initialized properly?");
132 return value;
133 }
134
135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
136 assert_is_aligned(v, Metaspace::commit_alignment());
137
138 size_t old_capacity_until_GC = _capacity_until_GC;
139 size_t new_value = old_capacity_until_GC + v;
140
141 if (new_value < old_capacity_until_GC) {
142 // The addition wrapped around, set new_value to aligned max value.
143 new_value = align_down(max_uintx, Metaspace::commit_alignment());
144 }
145
146 if (new_value > MaxMetaspaceSize) {
147 if (can_retry != NULL) {
148 *can_retry = false;
149 }
150 return false;
151 }
152
153 if (can_retry != NULL) {
154 *can_retry = true;
155 }
156 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
157
158 if (old_capacity_until_GC != prev_value) {
159 return false;
160 }
161
162 if (new_cap_until_GC != NULL) {
163 *new_cap_until_GC = new_value;
164 }
165 if (old_cap_until_GC != NULL) {
166 *old_cap_until_GC = old_capacity_until_GC;
167 }
168 return true;
169 }
170
171 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
172 assert_is_aligned(v, Metaspace::commit_alignment());
173
174 return Atomic::sub(v, &_capacity_until_GC);
175 }
229 assert(_shrink_factor <= 100, "invalid shrink factor");
230 uint current_shrink_factor = _shrink_factor;
231 _shrink_factor = 0;
232
233 // Using committed_bytes() for used_after_gc is an overestimation, since the
234 // chunk free lists are included in committed_bytes() and the memory in an
235 // un-fragmented chunk free list is available for future allocations.
236 // However, if the chunk free lists becomes fragmented, then the memory may
237 // not be available for future allocations and the memory is therefore "in use".
238 // Including the chunk free lists in the definition of "in use" is therefore
239 // necessary. Not including the chunk free lists can cause capacity_until_GC to
240 // shrink below committed_bytes() and this has caused serious bugs in the past.
241 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
242 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
243
244 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
245 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
246
247 const double min_tmp = used_after_gc / maximum_used_percentage;
248 size_t minimum_desired_capacity =
249 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
250 // Don't shrink less than the initial generation size
251 minimum_desired_capacity = MAX2(minimum_desired_capacity,
252 MetaspaceSize);
253
254 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
255 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
256 minimum_free_percentage, maximum_used_percentage);
257 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
258
259
260 size_t shrink_bytes = 0;
261 if (capacity_until_GC < minimum_desired_capacity) {
262 // If we have less capacity below the metaspace HWM, then
263 // increment the HWM.
264 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
265 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
266 // Don't expand unless it's significant
267 if (expand_bytes >= MinMetaspaceExpansion) {
268 size_t new_capacity_until_GC = 0;
269 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
276 minimum_desired_capacity / (double) K,
277 expand_bytes / (double) K,
278 MinMetaspaceExpansion / (double) K,
279 new_capacity_until_GC / (double) K);
280 }
281 return;
282 }
283
284 // No expansion, now see if we want to shrink
285 // We would never want to shrink more than this
286 assert(capacity_until_GC >= minimum_desired_capacity,
287 SIZE_FORMAT " >= " SIZE_FORMAT,
288 capacity_until_GC, minimum_desired_capacity);
289 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
290
291 // Should shrinking be considered?
292 if (MaxMetaspaceFreeRatio < 100) {
293 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
294 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
295 const double max_tmp = used_after_gc / minimum_used_percentage;
296 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
297 maximum_desired_capacity = MAX2(maximum_desired_capacity,
298 MetaspaceSize);
299 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
300 maximum_free_percentage, minimum_used_percentage);
301 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
302 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
303
304 assert(minimum_desired_capacity <= maximum_desired_capacity,
305 "sanity check");
306
307 if (capacity_until_GC > maximum_desired_capacity) {
308 // Capacity too large, compute shrinking size
309 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
310 // We don't want shrink all the way back to initSize if people call
311 // System.gc(), because some programs do that between "phases" and then
312 // we'd just have to grow the heap up again for the next phase. So we
313 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
314 // on the third call, and 100% by the fourth call. But if we recompute
315 // size without shrinking, it goes back to 0%.
316 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1463 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1464 Metaspace::assert_not_frozen();
1465
1466 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1467
1468 // Don't use class_vsm() unless UseCompressedClassPointers is true.
1469 if (Metaspace::is_class_space_allocation(mdtype)) {
1470 return class_vsm()->allocate(word_size);
1471 } else {
1472 return vsm()->allocate(word_size);
1473 }
1474 }
1475
1476 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1477 Metaspace::assert_not_frozen();
1478 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1479 assert(delta_bytes > 0, "Must be");
1480
1481 size_t before = 0;
1482 size_t after = 0;
1483 bool can_retry = true;
1484 MetaWord* res;
1485 bool incremented;
1486
1487 // Each thread increments the HWM at most once. Even if the thread fails to increment
1488 // the HWM, an allocation is still attempted. This is because another thread must then
1489 // have incremented the HWM and therefore the allocation might still succeed.
1490 do {
1491 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
1492 res = allocate(word_size, mdtype);
1493 } while (!incremented && res == NULL && can_retry);
1494
1495 if (incremented) {
1496 Metaspace::tracer()->report_gc_threshold(before, after,
1497 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1498 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1499 }
1500
1501 return res;
1502 }
1503
1504 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1505 return (vsm()->used_words() +
1506 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1507 }
1508
1509 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1510 return (vsm()->capacity_words() +
1511 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1512 }
1513
|