1 /*
2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/filemap.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/metaspace/chunkManager.hpp"
34 #include "memory/metaspace/metachunk.hpp"
35 #include "memory/metaspace/metaspaceCommon.hpp"
36 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
37 #include "memory/metaspace/spaceManager.hpp"
38 #include "memory/metaspace/virtualSpaceList.hpp"
39 #include "memory/metaspaceShared.hpp"
40 #include "memory/metaspaceTracer.hpp"
41 #include "memory/universe.hpp"
42 #include "oops/compressedOops.hpp"
43 #include "runtime/init.hpp"
44 #include "runtime/orderAccess.hpp"
45 #include "services/memTracker.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/formatBuffer.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/vmError.hpp"
51
52
53 using namespace metaspace;
54
55 MetaWord* last_allocated = 0;
56
57 size_t Metaspace::_compressed_class_space_size;
58 const MetaspaceTracer* Metaspace::_tracer = NULL;
59
60 DEBUG_ONLY(bool Metaspace::_frozen = false;)
61
62 static const char* space_type_name(Metaspace::MetaspaceType t) {
63 const char* s = NULL;
64 switch (t) {
65 case Metaspace::StandardMetaspaceType: s = "Standard"; break;
66 case Metaspace::BootMetaspaceType: s = "Boot"; break;
67 case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break;
68 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
69 default: ShouldNotReachHere();
70 }
71 return s;
72 }
73
74 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
75 uint MetaspaceGC::_shrink_factor = 0;
76
77 // BlockFreelist methods
78
79 // VirtualSpaceNode methods
80
81 // MetaspaceGC methods
82
83 // VM_CollectForMetadataAllocation is the vm operation used to GC.
84 // Within the VM operation after the GC the attempt to allocate the metadata
85 // should succeed. If the GC did not free enough space for the metaspace
86 // allocation, the HWM is increased so that another virtualspace will be
87 // allocated for the metadata. With perm gen the increase in the perm
88 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
89 // metaspace policy uses those as the small and large steps for the HWM.
90 //
91 // After the GC the compute_new_size() for MetaspaceGC is called to
92 // resize the capacity of the metaspaces. The current implementation
93 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
94 // to resize the Java heap by some GC's. New flags can be implemented
95 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
96 // free space is desirable in the metaspace capacity to decide how much
97 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
98 // free space is desirable in the metaspace capacity before decreasing
99 // the HWM.
100
101 // Calculate the amount to increase the high water mark (HWM).
102 // Increase by a minimum amount (MinMetaspaceExpansion) so that
103 // another expansion is not requested too soon. If that is not
104 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
105 // If that is still not enough, expand by the size of the allocation
106 // plus some.
107 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
108 size_t min_delta = MinMetaspaceExpansion;
109 size_t max_delta = MaxMetaspaceExpansion;
110 size_t delta = align_up(bytes, Metaspace::commit_alignment());
111
112 if (delta <= min_delta) {
113 delta = min_delta;
114 } else if (delta <= max_delta) {
115 // Don't want to hit the high water mark on the next
116 // allocation so make the delta greater than just enough
117 // for this allocation.
118 delta = max_delta;
119 } else {
120 // This allocation is large but the next ones are probably not
121 // so increase by the minimum.
122 delta = delta + min_delta;
123 }
124
125 assert_is_aligned(delta, Metaspace::commit_alignment());
126
127 return delta;
128 }
129
130 size_t MetaspaceGC::capacity_until_GC() {
131 size_t value = Atomic::load_acquire(&_capacity_until_GC);
132 assert(value >= MetaspaceSize, "Not initialized properly?");
133 return value;
134 }
135
136 // Try to increase the _capacity_until_GC limit counter by v bytes.
137 // Returns true if it succeeded. It may fail if either another thread
138 // concurrently increased the limit or the new limit would be larger
139 // than MaxMetaspaceSize.
140 // On success, optionally returns new and old metaspace capacity in
141 // new_cap_until_GC and old_cap_until_GC respectively.
142 // On error, optionally sets can_retry to indicate whether if there is
143 // actually enough space remaining to satisfy the request.
144 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
145 assert_is_aligned(v, Metaspace::commit_alignment());
146
147 size_t old_capacity_until_GC = _capacity_until_GC;
148 size_t new_value = old_capacity_until_GC + v;
149
150 if (new_value < old_capacity_until_GC) {
151 // The addition wrapped around, set new_value to aligned max value.
152 new_value = align_down(max_uintx, Metaspace::commit_alignment());
153 }
154
155 if (new_value > MaxMetaspaceSize) {
156 if (can_retry != NULL) {
157 *can_retry = false;
158 }
159 return false;
160 }
161
162 if (can_retry != NULL) {
163 *can_retry = true;
164 }
165 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
166
167 if (old_capacity_until_GC != prev_value) {
168 return false;
169 }
170
171 if (new_cap_until_GC != NULL) {
172 *new_cap_until_GC = new_value;
173 }
174 if (old_cap_until_GC != NULL) {
175 *old_cap_until_GC = old_capacity_until_GC;
176 }
177 return true;
178 }
179
180 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
181 assert_is_aligned(v, Metaspace::commit_alignment());
182
183 return Atomic::sub(&_capacity_until_GC, v);
184 }
185
186 void MetaspaceGC::initialize() {
187 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
188 // we can't do a GC during initialization.
189 _capacity_until_GC = MaxMetaspaceSize;
190 }
191
192 void MetaspaceGC::post_initialize() {
193 // Reset the high-water mark once the VM initialization is done.
194 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
195 }
196
197 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
198 // Check if the compressed class space is full.
199 if (is_class && Metaspace::using_class_space()) {
200 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
201 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
202 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
203 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
204 return false;
205 }
206 }
207
208 // Check if the user has imposed a limit on the metaspace memory.
209 size_t committed_bytes = MetaspaceUtils::committed_bytes();
210 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
211 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
212 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
213 return false;
214 }
215
216 return true;
217 }
218
219 size_t MetaspaceGC::allowed_expansion() {
220 size_t committed_bytes = MetaspaceUtils::committed_bytes();
221 size_t capacity_until_gc = capacity_until_GC();
222
223 assert(capacity_until_gc >= committed_bytes,
224 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
225 capacity_until_gc, committed_bytes);
226
227 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
228 size_t left_until_GC = capacity_until_gc - committed_bytes;
229 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
230 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
231 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
232 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
233
234 return left_to_commit / BytesPerWord;
235 }
236
237 void MetaspaceGC::compute_new_size() {
238 assert(_shrink_factor <= 100, "invalid shrink factor");
239 uint current_shrink_factor = _shrink_factor;
240 _shrink_factor = 0;
241
242 // Using committed_bytes() for used_after_gc is an overestimation, since the
243 // chunk free lists are included in committed_bytes() and the memory in an
244 // un-fragmented chunk free list is available for future allocations.
245 // However, if the chunk free lists becomes fragmented, then the memory may
246 // not be available for future allocations and the memory is therefore "in use".
247 // Including the chunk free lists in the definition of "in use" is therefore
248 // necessary. Not including the chunk free lists can cause capacity_until_GC to
249 // shrink below committed_bytes() and this has caused serious bugs in the past.
250 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
251 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
252
253 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
254 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
255
256 const double min_tmp = used_after_gc / maximum_used_percentage;
257 size_t minimum_desired_capacity =
258 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
259 // Don't shrink less than the initial generation size
260 minimum_desired_capacity = MAX2(minimum_desired_capacity,
261 MetaspaceSize);
262
263 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
264 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
265 minimum_free_percentage, maximum_used_percentage);
266 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
267
268
269 size_t shrink_bytes = 0;
270 if (capacity_until_GC < minimum_desired_capacity) {
271 // If we have less capacity below the metaspace HWM, then
272 // increment the HWM.
273 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
274 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
275 // Don't expand unless it's significant
276 if (expand_bytes >= MinMetaspaceExpansion) {
277 size_t new_capacity_until_GC = 0;
278 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
279 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
280
281 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
282 new_capacity_until_GC,
283 MetaspaceGCThresholdUpdater::ComputeNewSize);
284 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB",
285 minimum_desired_capacity / (double) K,
286 expand_bytes / (double) K,
287 MinMetaspaceExpansion / (double) K,
288 new_capacity_until_GC / (double) K);
289 }
290 return;
291 }
292
293 // No expansion, now see if we want to shrink
294 // We would never want to shrink more than this
295 assert(capacity_until_GC >= minimum_desired_capacity,
296 SIZE_FORMAT " >= " SIZE_FORMAT,
297 capacity_until_GC, minimum_desired_capacity);
298 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
299
300 // Should shrinking be considered?
301 if (MaxMetaspaceFreeRatio < 100) {
302 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
303 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
304 const double max_tmp = used_after_gc / minimum_used_percentage;
305 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
306 maximum_desired_capacity = MAX2(maximum_desired_capacity,
307 MetaspaceSize);
308 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
309 maximum_free_percentage, minimum_used_percentage);
310 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
311 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
312
313 assert(minimum_desired_capacity <= maximum_desired_capacity,
314 "sanity check");
315
316 if (capacity_until_GC > maximum_desired_capacity) {
317 // Capacity too large, compute shrinking size
318 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
319 // We don't want shrink all the way back to initSize if people call
320 // System.gc(), because some programs do that between "phases" and then
321 // we'd just have to grow the heap up again for the next phase. So we
322 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
323 // on the third call, and 100% by the fourth call. But if we recompute
324 // size without shrinking, it goes back to 0%.
325 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
326
327 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
328
329 assert(shrink_bytes <= max_shrink_bytes,
330 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
331 shrink_bytes, max_shrink_bytes);
332 if (current_shrink_factor == 0) {
333 _shrink_factor = 10;
334 } else {
335 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
336 }
337 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK",
338 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
339 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK",
340 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
341 }
342 }
343
344 // Don't shrink unless it's significant
345 if (shrink_bytes >= MinMetaspaceExpansion &&
346 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
347 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
348 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
349 new_capacity_until_GC,
350 MetaspaceGCThresholdUpdater::ComputeNewSize);
351 }
352 }
353
354 // MetaspaceUtils
355 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
356 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
357 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
358
359 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
360 // output will be the accumulated values for all live metaspaces.
361 // Note: method does not do any locking.
362 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
363 out->reset();
364 ClassLoaderDataGraphMetaspaceIterator iter;
365 while (iter.repeat()) {
366 ClassLoaderMetaspace* msp = iter.get_next();
367 if (msp != NULL) {
368 msp->add_to_statistics(out);
369 }
370 }
371 }
372
373 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
374 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
375 return list == NULL ? 0 : list->free_bytes();
376 }
377
378 size_t MetaspaceUtils::free_in_vs_bytes() {
379 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
380 }
381
382 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
383 assert_lock_strong(MetaspaceExpand_lock);
384 (*pstat) += words;
385 }
386
387 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
388 assert_lock_strong(MetaspaceExpand_lock);
389 const size_t size_now = *pstat;
390 assert(size_now >= words, "About to decrement counter below zero "
391 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
392 size_now, words);
393 *pstat = size_now - words;
394 }
395
396 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
397 Atomic::add(pstat, words);
398 }
399
400 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
401 const size_t size_now = *pstat;
402 assert(size_now >= words, "About to decrement counter below zero "
403 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
404 size_now, words);
405 Atomic::sub(pstat, words);
406 }
407
408 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
409 dec_stat_nonatomically(&_capacity_words[mdtype], words);
410 }
411 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
412 inc_stat_nonatomically(&_capacity_words[mdtype], words);
413 }
414 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
415 dec_stat_atomically(&_used_words[mdtype], words);
416 }
417 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
418 inc_stat_atomically(&_used_words[mdtype], words);
419 }
420 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
421 dec_stat_nonatomically(&_overhead_words[mdtype], words);
422 }
423 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
424 inc_stat_nonatomically(&_overhead_words[mdtype], words);
425 }
426
427 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
428 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
429 return list == NULL ? 0 : list->reserved_bytes();
430 }
431
432 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
433 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
434 return list == NULL ? 0 : list->committed_bytes();
435 }
436
437 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
438
439 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
440 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
441 if (chunk_manager == NULL) {
442 return 0;
443 }
444 return chunk_manager->free_chunks_total_words();
445 }
446
447 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
448 return free_chunks_total_words(mdtype) * BytesPerWord;
449 }
450
451 size_t MetaspaceUtils::free_chunks_total_words() {
452 return free_chunks_total_words(Metaspace::ClassType) +
453 free_chunks_total_words(Metaspace::NonClassType);
454 }
455
456 size_t MetaspaceUtils::free_chunks_total_bytes() {
457 return free_chunks_total_words() * BytesPerWord;
458 }
459
460 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
461 return Metaspace::get_chunk_manager(mdtype) != NULL;
462 }
463
464 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
465 if (!has_chunk_free_list(mdtype)) {
466 return MetaspaceChunkFreeListSummary();
467 }
468
469 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
470 return cm->chunk_free_list_summary();
471 }
472
473 void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) {
474 const metaspace::MetaspaceSizesSnapshot meta_values;
475
476 if (Metaspace::using_class_space()) {
477 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
478 HEAP_CHANGE_FORMAT" "
479 HEAP_CHANGE_FORMAT,
480 HEAP_CHANGE_FORMAT_ARGS("Metaspace",
481 pre_meta_values.used(),
482 pre_meta_values.committed(),
483 meta_values.used(),
484 meta_values.committed()),
485 HEAP_CHANGE_FORMAT_ARGS("NonClass",
486 pre_meta_values.non_class_used(),
487 pre_meta_values.non_class_committed(),
488 meta_values.non_class_used(),
489 meta_values.non_class_committed()),
490 HEAP_CHANGE_FORMAT_ARGS("Class",
491 pre_meta_values.class_used(),
492 pre_meta_values.class_committed(),
493 meta_values.class_used(),
494 meta_values.class_committed()));
495 } else {
496 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
497 HEAP_CHANGE_FORMAT_ARGS("Metaspace",
498 pre_meta_values.used(),
499 pre_meta_values.committed(),
500 meta_values.used(),
501 meta_values.committed()));
502 }
503 }
504
505 void MetaspaceUtils::print_on(outputStream* out) {
506 Metaspace::MetadataType nct = Metaspace::NonClassType;
507
508 out->print_cr(" Metaspace "
509 "used " SIZE_FORMAT "K, "
510 "capacity " SIZE_FORMAT "K, "
511 "committed " SIZE_FORMAT "K, "
512 "reserved " SIZE_FORMAT "K",
513 used_bytes()/K,
514 capacity_bytes()/K,
515 committed_bytes()/K,
516 reserved_bytes()/K);
517
518 if (Metaspace::using_class_space()) {
519 Metaspace::MetadataType ct = Metaspace::ClassType;
520 out->print_cr(" class space "
521 "used " SIZE_FORMAT "K, "
522 "capacity " SIZE_FORMAT "K, "
523 "committed " SIZE_FORMAT "K, "
524 "reserved " SIZE_FORMAT "K",
525 used_bytes(ct)/K,
526 capacity_bytes(ct)/K,
527 committed_bytes(ct)/K,
528 reserved_bytes(ct)/K);
529 }
530 }
531
532
533 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
534 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
535 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
536 {
537 if (Metaspace::using_class_space()) {
538 out->print(" Non-class space: ");
539 }
540 print_scaled_words(out, reserved_nonclass_words, scale, 7);
541 out->print(" reserved, ");
542 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
543 out->print_cr(" committed ");
544
545 if (Metaspace::using_class_space()) {
546 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
547 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
548 out->print(" Class space: ");
549 print_scaled_words(out, reserved_class_words, scale, 7);
550 out->print(" reserved, ");
551 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
552 out->print_cr(" committed ");
553
554 const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
555 const size_t committed_words = committed_nonclass_words + committed_class_words;
556 out->print(" Both: ");
557 print_scaled_words(out, reserved_words, scale, 7);
558 out->print(" reserved, ");
559 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
560 out->print_cr(" committed ");
561 }
562 }
563 }
564
565 static void print_basic_switches(outputStream* out, size_t scale) {
566 out->print("MaxMetaspaceSize: ");
567 if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) {
568 // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real
569 // value is smaller.
570 out->print("unlimited");
571 } else {
572 print_human_readable_size(out, MaxMetaspaceSize, scale);
573 }
574 out->cr();
575 if (Metaspace::using_class_space()) {
576 out->print("CompressedClassSpaceSize: ");
577 print_human_readable_size(out, CompressedClassSpaceSize, scale);
578 }
579 out->cr();
580 }
581
582 // This will print out a basic metaspace usage report but
583 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
584 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
585
586 if (!Metaspace::initialized()) {
587 out->print_cr("Metaspace not yet initialized.");
588 return;
589 }
590
591 out->cr();
592 out->print_cr("Usage:");
593
594 if (Metaspace::using_class_space()) {
595 out->print(" Non-class: ");
596 }
597
598 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
599 // MetaspaceUtils.
600 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
601 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
602 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
603 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
604
605 print_scaled_words(out, cap_nc, scale, 5);
606 out->print(" capacity, ");
607 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
608 out->print(" used, ");
609 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
610 out->print(" free+waste, ");
611 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
612 out->print(" overhead. ");
613 out->cr();
614
615 if (Metaspace::using_class_space()) {
616 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
617 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
618 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
619 const size_t free_and_waste_c = cap_c - overhead_c - used_c;
620 out->print(" Class: ");
621 print_scaled_words(out, cap_c, scale, 5);
622 out->print(" capacity, ");
623 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
624 out->print(" used, ");
625 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
626 out->print(" free+waste, ");
627 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
628 out->print(" overhead. ");
629 out->cr();
630
631 out->print(" Both: ");
632 const size_t cap = cap_nc + cap_c;
633
634 print_scaled_words(out, cap, scale, 5);
635 out->print(" capacity, ");
636 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
637 out->print(" used, ");
638 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
639 out->print(" free+waste, ");
640 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
641 out->print(" overhead. ");
642 out->cr();
643 }
644
645 out->cr();
646 out->print_cr("Virtual space:");
647
648 print_vs(out, scale);
649
650 out->cr();
651 out->print_cr("Chunk freelists:");
652
653 if (Metaspace::using_class_space()) {
654 out->print(" Non-Class: ");
655 }
656 print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
657 out->cr();
658 if (Metaspace::using_class_space()) {
659 out->print(" Class: ");
660 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale);
661 out->cr();
662 out->print(" Both: ");
663 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() +
664 Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
665 out->cr();
666 }
667
668 out->cr();
669
670 // Print basic settings
671 print_basic_switches(out, scale);
672
673 out->cr();
674
675 }
676
677 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
678
679 if (!Metaspace::initialized()) {
680 out->print_cr("Metaspace not yet initialized.");
681 return;
682 }
683
684 const bool print_loaders = (flags & rf_show_loaders) > 0;
685 const bool print_classes = (flags & rf_show_classes) > 0;
686 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
687 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
688
689 // Some report options require walking the class loader data graph.
690 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype);
691 if (print_loaders) {
692 out->cr();
693 out->print_cr("Usage per loader:");
694 out->cr();
695 }
696
697 ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print
698
699 // Print totals, broken up by space type.
700 if (print_by_spacetype) {
701 out->cr();
702 out->print_cr("Usage per space type:");
703 out->cr();
704 for (int space_type = (int)Metaspace::ZeroMetaspaceType;
705 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
706 {
707 uintx num_loaders = cl._num_loaders_by_spacetype[space_type];
708 uintx num_classes = cl._num_classes_by_spacetype[space_type];
709 out->print("%s - " UINTX_FORMAT " %s",
710 space_type_name((Metaspace::MetaspaceType)space_type),
711 num_loaders, loaders_plural(num_loaders));
712 if (num_classes > 0) {
713 out->print(", ");
714 print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]);
715 out->print(":");
716 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
717 } else {
718 out->print(".");
719 out->cr();
720 }
721 out->cr();
722 }
723 }
724
725 // Print totals for in-use data:
726 out->cr();
727 {
728 uintx num_loaders = cl._num_loaders;
729 out->print("Total Usage - " UINTX_FORMAT " %s, ",
730 num_loaders, loaders_plural(num_loaders));
731 print_number_of_classes(out, cl._num_classes, cl._num_classes_shared);
732 out->print(":");
733 cl._stats_total.print_on(out, scale, print_by_chunktype);
734 out->cr();
735 }
736
737 // -- Print Virtual space.
738 out->cr();
739 out->print_cr("Virtual space:");
740
741 print_vs(out, scale);
742
743 // -- Print VirtualSpaceList details.
744 if ((flags & rf_show_vslist) > 0) {
745 out->cr();
746 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
747
748 if (Metaspace::using_class_space()) {
749 out->print_cr(" Non-Class:");
750 }
751 Metaspace::space_list()->print_on(out, scale);
752 if (Metaspace::using_class_space()) {
753 out->print_cr(" Class:");
754 Metaspace::class_space_list()->print_on(out, scale);
755 }
756 }
757 out->cr();
758
759 // -- Print VirtualSpaceList map.
760 if ((flags & rf_show_vsmap) > 0) {
761 out->cr();
762 out->print_cr("Virtual space map:");
763
764 if (Metaspace::using_class_space()) {
765 out->print_cr(" Non-Class:");
766 }
767 Metaspace::space_list()->print_map(out);
768 if (Metaspace::using_class_space()) {
769 out->print_cr(" Class:");
770 Metaspace::class_space_list()->print_map(out);
771 }
772 }
773 out->cr();
774
775 // -- Print Freelists (ChunkManager) details
776 out->cr();
777 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
778
779 ChunkManagerStatistics non_class_cm_stat;
780 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
781
782 if (Metaspace::using_class_space()) {
783 out->print_cr(" Non-Class:");
784 }
785 non_class_cm_stat.print_on(out, scale);
786
787 if (Metaspace::using_class_space()) {
788 ChunkManagerStatistics class_cm_stat;
789 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
790 out->print_cr(" Class:");
791 class_cm_stat.print_on(out, scale);
792 }
793
794 // As a convenience, print a summary of common waste.
795 out->cr();
796 out->print("Waste ");
797 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
798 const size_t committed_words = committed_bytes() / BytesPerWord;
799
800 out->print("(percentages refer to total committed size ");
801 print_scaled_words(out, committed_words, scale);
802 out->print_cr("):");
803
804 // Print space committed but not yet used by any class loader
805 const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
806 out->print(" Committed unused: ");
807 print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
808 out->cr();
809
810 // Print waste for in-use chunks.
811 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
812 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
813 UsedChunksStatistics ucs_all;
814 ucs_all.add(ucs_nonclass);
815 ucs_all.add(ucs_class);
816
817 out->print(" Waste in chunks in use: ");
818 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
819 out->cr();
820 out->print(" Free in chunks in use: ");
821 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
822 out->cr();
823 out->print(" Overhead in chunks in use: ");
824 print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
825 out->cr();
826
827 // Print waste in free chunks.
828 const size_t total_capacity_in_free_chunks =
829 Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
830 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
831 out->print(" In free chunks: ");
832 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
833 out->cr();
834
835 // Print waste in deallocated blocks.
836 const uintx free_blocks_num =
837 cl._stats_total.nonclass_sm_stats().free_blocks_num() +
838 cl._stats_total.class_sm_stats().free_blocks_num();
839 const size_t free_blocks_cap_words =
840 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
841 cl._stats_total.class_sm_stats().free_blocks_cap_words();
842 out->print("Deallocated from chunks in use: ");
843 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
844 out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
845 out->cr();
846
847 // Print total waste.
848 const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
849 + free_blocks_cap_words + unused_words_in_vs;
850 out->print(" -total-: ");
851 print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
852 out->cr();
853
854 // Print internal statistics
855 #ifdef ASSERT
856 out->cr();
857 out->cr();
858 out->print_cr("Internal statistics:");
859 out->cr();
860 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
861 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
862 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
863 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
864 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
865 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
866 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
867 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
868 out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".",
869 g_internal_statistics.num_chunks_added_to_freelist);
870 out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".",
871 g_internal_statistics.num_chunks_removed_from_freelist);
872 out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".",
873 g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits);
874
875 out->cr();
876 #endif
877
878 // Print some interesting settings
879 out->cr();
880 out->cr();
881 print_basic_switches(out, scale);
882
883 out->cr();
884 out->print("InitialBootClassLoaderMetaspaceSize: ");
885 print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
886
887 out->cr();
888 out->cr();
889
890 } // MetaspaceUtils::print_report()
891
892 // Prints an ASCII representation of the given space.
893 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
894 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
895 const bool for_class = mdtype == Metaspace::ClassType ? true : false;
896 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
897 if (vsl != NULL) {
898 if (for_class) {
899 if (!Metaspace::using_class_space()) {
900 out->print_cr("No Class Space.");
901 return;
902 }
903 out->print_raw("---- Metaspace Map (Class Space) ----");
904 } else {
905 out->print_raw("---- Metaspace Map (Non-Class Space) ----");
906 }
907 // Print legend:
908 out->cr();
909 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
910 out->cr();
911 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
912 vsl->print_map(out);
913 out->cr();
914 }
915 }
916
917 void MetaspaceUtils::verify_free_chunks() {
918 #ifdef ASSERT
919 Metaspace::chunk_manager_metadata()->verify(false);
920 if (Metaspace::using_class_space()) {
921 Metaspace::chunk_manager_class()->verify(false);
922 }
923 #endif
924 }
925
926 void MetaspaceUtils::verify_metrics() {
927 #ifdef ASSERT
928 // Please note: there are time windows where the internal counters are out of sync with
929 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
930 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
931 // not be counted when iterating the CLDG. So be careful when you call this method.
932 ClassLoaderMetaspaceStatistics total_stat;
933 collect_statistics(&total_stat);
934 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
935 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
936
937 bool mismatch = false;
938 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
939 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
940 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
941 if (capacity_words(mdtype) != chunk_stat.cap() ||
942 used_words(mdtype) != chunk_stat.used() ||
943 overhead_words(mdtype) != chunk_stat.overhead()) {
944 mismatch = true;
945 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
946 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
947 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
948 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
949 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
950 tty->flush();
951 }
952 }
953 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
954 #endif
955 }
956
957 // Metaspace methods
958
959 size_t Metaspace::_first_chunk_word_size = 0;
960 size_t Metaspace::_first_class_chunk_word_size = 0;
961
962 size_t Metaspace::_commit_alignment = 0;
963 size_t Metaspace::_reserve_alignment = 0;
964
965 VirtualSpaceList* Metaspace::_space_list = NULL;
966 VirtualSpaceList* Metaspace::_class_space_list = NULL;
967
968 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
969 ChunkManager* Metaspace::_chunk_manager_class = NULL;
970
971 bool Metaspace::_initialized = false;
972
973 #define VIRTUALSPACEMULTIPLIER 2
974
975 #ifdef _LP64
976 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
977
978 void Metaspace::set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base) {
979 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
980 // Figure out the narrow_klass_base and the narrow_klass_shift. The
981 // narrow_klass_base is the lower of the metaspace base and the cds base
982 // (if cds is enabled). The narrow_klass_shift depends on the distance
983 // between the lower base and higher address.
984 address lower_base = (address)metaspace_rs.base();
985 address higher_address = (address)metaspace_rs.end();
986 if (cds_base != NULL) {
987 assert(UseSharedSpaces, "must be");
988 lower_base = MIN2(lower_base, cds_base);
989 } else {
990 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
991 // If compressed class space fits in lower 32G, we don't need a base.
992 if (higher_address <= (address)klass_encoding_max) {
993 lower_base = 0; // Effectively lower base is zero.
994 }
995 }
996
997 CompressedKlassPointers::set_base(lower_base);
998
999 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
1000 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
1001 // how dump time narrow_klass_shift is set. Although, CDS can work
1002 // with zero-shift mode also, to be consistent with AOT it uses
1003 // LogKlassAlignmentInBytes for klass shift so archived java heap objects
1004 // can be used at same time as AOT code.
1005 if (!UseSharedSpaces
1006 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
1007 CompressedKlassPointers::set_shift(0);
1008 } else {
1009 CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
1010 }
1011 AOTLoader::set_narrow_klass_shift();
1012 }
1013
1014 // Try to allocate the metaspace at the requested addr.
1015 void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base) {
1016 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
1017 assert(using_class_space(), "called improperly");
1018 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
1019 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
1020 "Metaspace size is too big");
1021 assert_is_aligned(requested_addr, _reserve_alignment);
1022 assert_is_aligned(cds_base, _reserve_alignment);
1023 assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
1024
1025 // Don't use large pages for the class space.
1026 bool large_pages = false;
1027
1028 if (metaspace_rs.is_reserved()) {
1029 // CDS should have already reserved the space.
1030 assert(requested_addr == NULL, "not used");
1031 assert(cds_base != NULL, "CDS should have already reserved the memory space");
1032 } else {
1033 assert(cds_base == NULL, "must be");
1034 #if !(defined(AARCH64) || defined(AIX))
1035 metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment,
1036 large_pages, requested_addr);
1037 #else // AARCH64
1038 // Our compressed klass pointers may fit nicely into the lower 32
1039 // bits.
1040 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
1041 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1042 _reserve_alignment,
1043 large_pages,
1044 requested_addr);
1045 }
1046
1047 if (! metaspace_rs.is_reserved()) {
1048 // Aarch64: Try to align metaspace so that we can decode a compressed
1049 // klass with a single MOVK instruction. We can do this iff the
1050 // compressed class base is a multiple of 4G.
1051 // Aix: Search for a place where we can find memory. If we need to load
1052 // the base, 4G alignment is helpful, too.
1053 size_t increment = AARCH64_ONLY(4*)G;
1054 for (char *a = align_up(requested_addr, increment);
1055 a < (char*)(1024*G);
1056 a += increment) {
1057 if (a == (char *)(32*G)) {
1058 // Go faster from here on. Zero-based is no longer possible.
1059 increment = 4*G;
1060 }
1061
1062 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1063 _reserve_alignment,
1064 large_pages,
1065 a);
1066 if (metaspace_rs.is_reserved())
1067 break;
1068 }
1069 }
1070 #endif // AARCH64
1071 }
1072
1073 if (!metaspace_rs.is_reserved()) {
1074 assert(cds_base == NULL, "CDS should have already reserved the memory space");
1075 // If no successful allocation then try to allocate the space anywhere. If
1076 // that fails then OOM doom. At this point we cannot try allocating the
1077 // metaspace as if UseCompressedClassPointers is off because too much
1078 // initialization has happened that depends on UseCompressedClassPointers.
1079 // So, UseCompressedClassPointers cannot be turned off at this point.
1080 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1081 _reserve_alignment, large_pages);
1082 if (!metaspace_rs.is_reserved()) {
1083 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
1084 compressed_class_space_size()));
1085 }
1086 }
1087
1088 if (cds_base == NULL) {
1089 // If we got here then the metaspace got allocated.
1090 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
1091 }
1092
1093 set_narrow_klass_base_and_shift(metaspace_rs, cds_base);
1094
1095 initialize_class_space(metaspace_rs);
1096
1097 LogTarget(Trace, gc, metaspace) lt;
1098 if (lt.is_enabled()) {
1099 ResourceMark rm;
1100 LogStream ls(lt);
1101 print_compressed_class_space(&ls, requested_addr);
1102 }
1103 }
1104
1105 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
1106 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
1107 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
1108 if (_class_space_list != NULL) {
1109 address base = (address)_class_space_list->current_virtual_space()->bottom();
1110 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
1111 compressed_class_space_size(), p2i(base));
1112 if (requested_addr != 0) {
1113 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
1114 }
1115 st->cr();
1116 }
1117 }
1118
1119 // For UseCompressedClassPointers the class space is reserved above the top of
1120 // the Java heap. The argument passed in is at the base of the compressed space.
1121 void Metaspace::initialize_class_space(ReservedSpace rs) {
1122 // The reserved space size may be bigger because of alignment, esp with UseLargePages
1123 assert(rs.size() >= CompressedClassSpaceSize,
1124 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
1125 assert(using_class_space(), "Must be using class space");
1126 _class_space_list = new VirtualSpaceList(rs);
1127 _chunk_manager_class = new ChunkManager(true/*is_class*/);
1128
1129 if (!_class_space_list->initialization_succeeded()) {
1130 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
1131 }
1132 }
1133
1134 #endif
1135
1136 void Metaspace::ergo_initialize() {
1137 if (DumpSharedSpaces) {
1138 // Using large pages when dumping the shared archive is currently not implemented.
1139 FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
1140 }
1141
1142 size_t page_size = os::vm_page_size();
1143 if (UseLargePages && UseLargePagesInMetaspace) {
1144 page_size = os::large_page_size();
1145 }
1146
1147 _commit_alignment = page_size;
1148 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
1149
1150 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
1151 // override if MaxMetaspaceSize was set on the command line or not.
1152 // This information is needed later to conform to the specification of the
1153 // java.lang.management.MemoryUsage API.
1154 //
1155 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
1156 // globals.hpp to the aligned value, but this is not possible, since the
1157 // alignment depends on other flags being parsed.
1158 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
1159
1160 if (MetaspaceSize > MaxMetaspaceSize) {
1161 MetaspaceSize = MaxMetaspaceSize;
1162 }
1163
1164 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
1165
1166 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
1167
1168 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
1169 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
1170
1171 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
1172
1173 // Initial virtual space size will be calculated at global_initialize()
1174 size_t min_metaspace_sz =
1175 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
1176 if (UseCompressedClassPointers) {
1177 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) {
1178 if (min_metaspace_sz >= MaxMetaspaceSize) {
1179 vm_exit_during_initialization("MaxMetaspaceSize is too small.");
1180 } else {
1181 FLAG_SET_ERGO(CompressedClassSpaceSize,
1182 MaxMetaspaceSize - min_metaspace_sz);
1183 }
1184 }
1185 } else if (min_metaspace_sz >= MaxMetaspaceSize) {
1186 FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
1187 min_metaspace_sz);
1188 }
1189
1190 set_compressed_class_space_size(CompressedClassSpaceSize);
1191 }
1192
1193 void Metaspace::global_initialize() {
1194 MetaspaceGC::initialize();
1195
1196 bool class_space_inited = false;
1197 #if INCLUDE_CDS
1198 if (DumpSharedSpaces) {
1199 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
1200 class_space_inited = true;
1201 } else if (UseSharedSpaces) {
1202 // If any of the archived space fails to map, UseSharedSpaces
1203 // is reset to false.
1204 MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
1205 class_space_inited = UseSharedSpaces;
1206 }
1207
1208 if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
1209 vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
1210 }
1211 #endif // INCLUDE_CDS
1212
1213 #ifdef _LP64
1214 if (using_class_space() && !class_space_inited) {
1215 char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
1216 ReservedSpace dummy;
1217 allocate_metaspace_compressed_klass_ptrs(dummy, base, 0);
1218 }
1219 #endif
1220
1221 // Initialize these before initializing the VirtualSpaceList
1222 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
1223 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
1224 // Make the first class chunk bigger than a medium chunk so it's not put
1225 // on the medium chunk list. The next chunk will be small and progress
1226 // from there. This size calculated by -version.
1227 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
1228 (CompressedClassSpaceSize/BytesPerWord)*2);
1229 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
1230 // Arbitrarily set the initial virtual space to a multiple
1231 // of the boot class loader size.
1232 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
1233 word_size = align_up(word_size, Metaspace::reserve_alignment_words());
1234
1235 // Initialize the list of virtual spaces.
1236 _space_list = new VirtualSpaceList(word_size);
1237 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
1238
1239 if (!_space_list->initialization_succeeded()) {
1240 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
1241 }
1242
1243 _tracer = new MetaspaceTracer();
1244
1245 _initialized = true;
1246
1247 }
1248
1249 void Metaspace::post_initialize() {
1250 MetaspaceGC::post_initialize();
1251 }
1252
1253 void Metaspace::verify_global_initialization() {
1254 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
1255 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
1256
1257 if (using_class_space()) {
1258 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
1259 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
1260 }
1261 }
1262
1263 size_t Metaspace::align_word_size_up(size_t word_size) {
1264 size_t byte_size = word_size * wordSize;
1265 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
1266 }
1267
1268 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
1269 MetaspaceObj::Type type, TRAPS) {
1270 assert(!_frozen, "sanity");
1271 assert(!(DumpSharedSpaces && THREAD->is_VM_thread()), "sanity");
1272
1273 if (HAS_PENDING_EXCEPTION) {
1274 assert(false, "Should not allocate with exception pending");
1275 return NULL; // caller does a CHECK_NULL too
1276 }
1277
1278 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
1279 "ClassLoaderData::the_null_class_loader_data() should have been used.");
1280
1281 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
1282
1283 // Try to allocate metadata.
1284 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
1285
1286 if (result == NULL) {
1287 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
1288
1289 // Allocation failed.
1290 if (is_init_completed()) {
1291 // Only start a GC if the bootstrapping has completed.
1292 // Try to clean out some heap memory and retry. This can prevent premature
1293 // expansion of the metaspace.
1294 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
1295 }
1296 }
1297
1298 if (result == NULL) {
1299 if (DumpSharedSpaces) {
1300 // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
1301 // We should abort to avoid generating a potentially bad archive.
1302 vm_exit_during_cds_dumping(err_msg("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
1303 MetaspaceObj::type_name(type), word_size * BytesPerWord),
1304 err_msg("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize));
1305 }
1306 report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
1307 assert(HAS_PENDING_EXCEPTION, "sanity");
1308 return NULL;
1309 }
1310
1311 // Zero initialize.
1312 Copy::fill_to_words((HeapWord*)result, word_size, 0);
1313
1314 return result;
1315 }
1316
1317 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
1318 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
1319
1320 // If result is still null, we are out of memory.
1321 Log(gc, metaspace, freelist, oom) log;
1322 if (log.is_info()) {
1323 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
1324 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
1325 ResourceMark rm;
1326 if (log.is_debug()) {
1327 if (loader_data->metaspace_or_null() != NULL) {
1328 LogStream ls(log.debug());
1329 loader_data->print_value_on(&ls);
1330 }
1331 }
1332 LogStream ls(log.info());
1333 // In case of an OOM, log out a short but still useful report.
1334 MetaspaceUtils::print_basic_report(&ls, 0);
1335 }
1336
1337 bool out_of_compressed_class_space = false;
1338 if (is_class_space_allocation(mdtype)) {
1339 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
1340 out_of_compressed_class_space =
1341 MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
1342 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
1343 CompressedClassSpaceSize;
1344 }
1345
1346 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
1347 const char* space_string = out_of_compressed_class_space ?
1348 "Compressed class space" : "Metaspace";
1349
1350 report_java_out_of_memory(space_string);
1351
1352 if (JvmtiExport::should_post_resource_exhausted()) {
1353 JvmtiExport::post_resource_exhausted(
1354 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
1355 space_string);
1356 }
1357
1358 if (!is_init_completed()) {
1359 vm_exit_during_initialization("OutOfMemoryError", space_string);
1360 }
1361
1362 if (out_of_compressed_class_space) {
1363 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
1364 } else {
1365 THROW_OOP(Universe::out_of_memory_error_metaspace());
1366 }
1367 }
1368
1369 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
1370 switch (mdtype) {
1371 case Metaspace::ClassType: return "Class";
1372 case Metaspace::NonClassType: return "Metadata";
1373 default:
1374 assert(false, "Got bad mdtype: %d", (int) mdtype);
1375 return NULL;
1376 }
1377 }
1378
1379 void Metaspace::purge(MetadataType mdtype) {
1380 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
1381 }
1382
1383 void Metaspace::purge() {
1384 MutexLocker cl(MetaspaceExpand_lock,
1385 Mutex::_no_safepoint_check_flag);
1386 purge(NonClassType);
1387 if (using_class_space()) {
1388 purge(ClassType);
1389 }
1390 }
1391
1392 bool Metaspace::contains(const void* ptr) {
1393 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1394 return true;
1395 }
1396 return contains_non_shared(ptr);
1397 }
1398
1399 bool Metaspace::contains_non_shared(const void* ptr) {
1400 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
1401 return true;
1402 }
1403
1404 return get_space_list(NonClassType)->contains(ptr);
1405 }
1406
1407 // ClassLoaderMetaspace
1408
1409 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
1410 : _space_type(type)
1411 , _lock(lock)
1412 , _vsm(NULL)
1413 , _class_vsm(NULL)
1414 {
1415 initialize(lock, type);
1416 }
1417
1418 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
1419 Metaspace::assert_not_frozen();
1420 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
1421 delete _vsm;
1422 if (Metaspace::using_class_space()) {
1423 delete _class_vsm;
1424 }
1425 }
1426
1427 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1428 Metachunk* chunk = get_initialization_chunk(type, mdtype);
1429 if (chunk != NULL) {
1430 // Add to this manager's list of chunks in use and make it the current_chunk().
1431 get_space_manager(mdtype)->add_chunk(chunk, true);
1432 }
1433 }
1434
1435 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1436 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
1437
1438 // Get a chunk from the chunk freelist
1439 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
1440
1441 if (chunk == NULL) {
1442 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
1443 get_space_manager(mdtype)->medium_chunk_bunch());
1444 }
1445
1446 return chunk;
1447 }
1448
1449 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
1450 Metaspace::verify_global_initialization();
1451
1452 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
1453
1454 // Allocate SpaceManager for metadata objects.
1455 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
1456
1457 if (Metaspace::using_class_space()) {
1458 // Allocate SpaceManager for classes.
1459 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
1460 }
1461
1462 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1463
1464 // Allocate chunk for metadata objects
1465 initialize_first_chunk(type, Metaspace::NonClassType);
1466
1467 // Allocate chunk for class metadata objects
1468 if (Metaspace::using_class_space()) {
1469 initialize_first_chunk(type, Metaspace::ClassType);
1470 }
1471 }
1472
1473 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1474 Metaspace::assert_not_frozen();
1475
1476 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1477
1478 // Don't use class_vsm() unless UseCompressedClassPointers is true.
1479 if (Metaspace::is_class_space_allocation(mdtype)) {
1480 return class_vsm()->allocate(word_size);
1481 } else {
1482 return vsm()->allocate(word_size);
1483 }
1484 }
1485
1486 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1487 Metaspace::assert_not_frozen();
1488 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1489 assert(delta_bytes > 0, "Must be");
1490
1491 size_t before = 0;
1492 size_t after = 0;
1493 bool can_retry = true;
1494 MetaWord* res;
1495 bool incremented;
1496
1497 // Each thread increments the HWM at most once. Even if the thread fails to increment
1498 // the HWM, an allocation is still attempted. This is because another thread must then
1499 // have incremented the HWM and therefore the allocation might still succeed.
1500 do {
1501 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
1502 res = allocate(word_size, mdtype);
1503 } while (!incremented && res == NULL && can_retry);
1504
1505 if (incremented) {
1506 Metaspace::tracer()->report_gc_threshold(before, after,
1507 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1508 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1509 }
1510
1511 return res;
1512 }
1513
1514 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1515 return (vsm()->used_words() +
1516 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1517 }
1518
1519 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1520 return (vsm()->capacity_words() +
1521 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1522 }
1523
1524 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
1525 Metaspace::assert_not_frozen();
1526 assert(!SafepointSynchronize::is_at_safepoint()
1527 || Thread::current()->is_VM_thread(), "should be the VM thread");
1528
1529 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
1530
1531 MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
1532
1533 if (is_class && Metaspace::using_class_space()) {
1534 class_vsm()->deallocate(ptr, word_size);
1535 } else {
1536 vsm()->deallocate(ptr, word_size);
1537 }
1538 }
1539
1540 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
1541 assert(Metaspace::using_class_space(), "Has to use class space");
1542 return class_vsm()->calc_chunk_size(word_size);
1543 }
1544
1545 void ClassLoaderMetaspace::print_on(outputStream* out) const {
1546 // Print both class virtual space counts and metaspace.
1547 if (Verbose) {
1548 vsm()->print_on(out);
1549 if (Metaspace::using_class_space()) {
1550 class_vsm()->print_on(out);
1551 }
1552 }
1553 }
1554
1555 void ClassLoaderMetaspace::verify() {
1556 vsm()->verify();
1557 if (Metaspace::using_class_space()) {
1558 class_vsm()->verify();
1559 }
1560 }
1561
1562 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
1563 assert_lock_strong(lock());
1564 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
1565 if (Metaspace::using_class_space()) {
1566 class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
1567 }
1568 }
1569
1570 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
1571 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
1572 add_to_statistics_locked(out);
1573 }
1574
1575 /////////////// Unit tests ///////////////
1576
1577 struct chunkmanager_statistics_t {
1578 int num_specialized_chunks;
1579 int num_small_chunks;
1580 int num_medium_chunks;
1581 int num_humongous_chunks;
1582 };
1583
1584 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1585 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1586 ChunkManagerStatistics stat;
1587 chunk_manager->collect_statistics(&stat);
1588 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1589 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1590 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1591 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1592 }
1593
1594 struct chunk_geometry_t {
1595 size_t specialized_chunk_word_size;
1596 size_t small_chunk_word_size;
1597 size_t medium_chunk_word_size;
1598 };
1599
1600 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
1601 if (mdType == Metaspace::NonClassType) {
1602 out->specialized_chunk_word_size = SpecializedChunk;
1603 out->small_chunk_word_size = SmallChunk;
1604 out->medium_chunk_word_size = MediumChunk;
1605 } else {
1606 out->specialized_chunk_word_size = ClassSpecializedChunk;
1607 out->small_chunk_word_size = ClassSmallChunk;
1608 out->medium_chunk_word_size = ClassMediumChunk;
1609 }
1610 }
--- EOF ---