1 /* 2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/filemap.hpp" 32 #include "memory/metaspace.hpp" 33 #include "memory/metaspace/chunkManager.hpp" 34 #include "memory/metaspace/metachunk.hpp" 35 #include "memory/metaspace/metaspaceCommon.hpp" 36 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp" 37 #include "memory/metaspace/spaceManager.hpp" 38 #include "memory/metaspace/virtualSpaceList.hpp" 39 #include "memory/metaspaceShared.hpp" 40 #include "memory/metaspaceTracer.hpp" 41 #include "memory/universe.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/orderAccess.hpp" 44 #include "services/memTracker.hpp" 45 #include "utilities/copy.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/formatBuffer.hpp" 48 #include "utilities/globalDefinitions.hpp" 49 50 51 using namespace metaspace; 52 53 MetaWord* last_allocated = 0; 54 55 size_t Metaspace::_compressed_class_space_size; 56 const MetaspaceTracer* Metaspace::_tracer = NULL; 57 58 DEBUG_ONLY(bool Metaspace::_frozen = false;) 59 60 static const char* space_type_name(Metaspace::MetaspaceType t) { 61 const char* s = NULL; 62 switch (t) { 63 case Metaspace::StandardMetaspaceType: s = "Standard"; break; 64 case Metaspace::BootMetaspaceType: s = "Boot"; break; 65 case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break; 66 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break; 67 default: ShouldNotReachHere(); 68 } 69 return s; 70 } 71 72 volatile size_t MetaspaceGC::_capacity_until_GC = 0; 73 uint MetaspaceGC::_shrink_factor = 0; 74 bool MetaspaceGC::_should_concurrent_collect = false; 75 76 // BlockFreelist methods 77 78 // VirtualSpaceNode methods 79 80 // MetaspaceGC methods 81 82 // VM_CollectForMetadataAllocation is the vm operation used to GC. 83 // Within the VM operation after the GC the attempt to allocate the metadata 84 // should succeed. If the GC did not free enough space for the metaspace 85 // allocation, the HWM is increased so that another virtualspace will be 86 // allocated for the metadata. With perm gen the increase in the perm 87 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 88 // metaspace policy uses those as the small and large steps for the HWM. 89 // 90 // After the GC the compute_new_size() for MetaspaceGC is called to 91 // resize the capacity of the metaspaces. The current implementation 92 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 93 // to resize the Java heap by some GC's. New flags can be implemented 94 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 95 // free space is desirable in the metaspace capacity to decide how much 96 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 97 // free space is desirable in the metaspace capacity before decreasing 98 // the HWM. 99 100 // Calculate the amount to increase the high water mark (HWM). 101 // Increase by a minimum amount (MinMetaspaceExpansion) so that 102 // another expansion is not requested too soon. If that is not 103 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 104 // If that is still not enough, expand by the size of the allocation 105 // plus some. 106 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 107 size_t min_delta = MinMetaspaceExpansion; 108 size_t max_delta = MaxMetaspaceExpansion; 109 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 110 111 if (delta <= min_delta) { 112 delta = min_delta; 113 } else if (delta <= max_delta) { 114 // Don't want to hit the high water mark on the next 115 // allocation so make the delta greater than just enough 116 // for this allocation. 117 delta = max_delta; 118 } else { 119 // This allocation is large but the next ones are probably not 120 // so increase by the minimum. 121 delta = delta + min_delta; 122 } 123 124 assert_is_aligned(delta, Metaspace::commit_alignment()); 125 126 return delta; 127 } 128 129 size_t MetaspaceGC::capacity_until_GC() { 130 size_t value = OrderAccess::load_acquire(&_capacity_until_GC); 131 assert(value >= MetaspaceSize, "Not initialized properly?"); 132 return value; 133 } 134 135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 136 assert_is_aligned(v, Metaspace::commit_alignment()); 137 138 size_t old_capacity_until_GC = _capacity_until_GC; 139 size_t new_value = old_capacity_until_GC + v; 140 141 if (new_value < old_capacity_until_GC) { 142 // The addition wrapped around, set new_value to aligned max value. 143 new_value = align_down(max_uintx, Metaspace::commit_alignment()); 144 } 145 146 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC); 147 148 if (old_capacity_until_GC != prev_value) { 149 return false; 150 } 151 152 if (new_cap_until_GC != NULL) { 153 *new_cap_until_GC = new_value; 154 } 155 if (old_cap_until_GC != NULL) { 156 *old_cap_until_GC = old_capacity_until_GC; 157 } 158 return true; 159 } 160 161 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 162 assert_is_aligned(v, Metaspace::commit_alignment()); 163 164 return Atomic::sub(v, &_capacity_until_GC); 165 } 166 167 void MetaspaceGC::initialize() { 168 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 169 // we can't do a GC during initialization. 170 _capacity_until_GC = MaxMetaspaceSize; 171 } 172 173 void MetaspaceGC::post_initialize() { 174 // Reset the high-water mark once the VM initialization is done. 175 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); 176 } 177 178 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 179 // Check if the compressed class space is full. 180 if (is_class && Metaspace::using_class_space()) { 181 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); 182 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 183 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", 184 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); 185 return false; 186 } 187 } 188 189 // Check if the user has imposed a limit on the metaspace memory. 190 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 191 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 192 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", 193 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); 194 return false; 195 } 196 197 return true; 198 } 199 200 size_t MetaspaceGC::allowed_expansion() { 201 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 202 size_t capacity_until_gc = capacity_until_GC(); 203 204 assert(capacity_until_gc >= committed_bytes, 205 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 206 capacity_until_gc, committed_bytes); 207 208 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 209 size_t left_until_GC = capacity_until_gc - committed_bytes; 210 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 211 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT 212 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", 213 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); 214 215 return left_to_commit / BytesPerWord; 216 } 217 218 void MetaspaceGC::compute_new_size() { 219 assert(_shrink_factor <= 100, "invalid shrink factor"); 220 uint current_shrink_factor = _shrink_factor; 221 _shrink_factor = 0; 222 223 // Using committed_bytes() for used_after_gc is an overestimation, since the 224 // chunk free lists are included in committed_bytes() and the memory in an 225 // un-fragmented chunk free list is available for future allocations. 226 // However, if the chunk free lists becomes fragmented, then the memory may 227 // not be available for future allocations and the memory is therefore "in use". 228 // Including the chunk free lists in the definition of "in use" is therefore 229 // necessary. Not including the chunk free lists can cause capacity_until_GC to 230 // shrink below committed_bytes() and this has caused serious bugs in the past. 231 const size_t used_after_gc = MetaspaceUtils::committed_bytes(); 232 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 233 234 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 235 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 236 237 const double min_tmp = used_after_gc / maximum_used_percentage; 238 size_t minimum_desired_capacity = 239 (size_t)MIN2(min_tmp, double(max_uintx)); 240 // Don't shrink less than the initial generation size 241 minimum_desired_capacity = MAX2(minimum_desired_capacity, 242 MetaspaceSize); 243 244 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 245 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 246 minimum_free_percentage, maximum_used_percentage); 247 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 248 249 250 size_t shrink_bytes = 0; 251 if (capacity_until_GC < minimum_desired_capacity) { 252 // If we have less capacity below the metaspace HWM, then 253 // increment the HWM. 254 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 255 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 256 // Don't expand unless it's significant 257 if (expand_bytes >= MinMetaspaceExpansion) { 258 size_t new_capacity_until_GC = 0; 259 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 260 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 261 262 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 263 new_capacity_until_GC, 264 MetaspaceGCThresholdUpdater::ComputeNewSize); 265 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 266 minimum_desired_capacity / (double) K, 267 expand_bytes / (double) K, 268 MinMetaspaceExpansion / (double) K, 269 new_capacity_until_GC / (double) K); 270 } 271 return; 272 } 273 274 // No expansion, now see if we want to shrink 275 // We would never want to shrink more than this 276 assert(capacity_until_GC >= minimum_desired_capacity, 277 SIZE_FORMAT " >= " SIZE_FORMAT, 278 capacity_until_GC, minimum_desired_capacity); 279 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 280 281 // Should shrinking be considered? 282 if (MaxMetaspaceFreeRatio < 100) { 283 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 284 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 285 const double max_tmp = used_after_gc / minimum_used_percentage; 286 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 287 maximum_desired_capacity = MAX2(maximum_desired_capacity, 288 MetaspaceSize); 289 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 290 maximum_free_percentage, minimum_used_percentage); 291 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 292 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 293 294 assert(minimum_desired_capacity <= maximum_desired_capacity, 295 "sanity check"); 296 297 if (capacity_until_GC > maximum_desired_capacity) { 298 // Capacity too large, compute shrinking size 299 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 300 // We don't want shrink all the way back to initSize if people call 301 // System.gc(), because some programs do that between "phases" and then 302 // we'd just have to grow the heap up again for the next phase. So we 303 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 304 // on the third call, and 100% by the fourth call. But if we recompute 305 // size without shrinking, it goes back to 0%. 306 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 307 308 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 309 310 assert(shrink_bytes <= max_shrink_bytes, 311 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 312 shrink_bytes, max_shrink_bytes); 313 if (current_shrink_factor == 0) { 314 _shrink_factor = 10; 315 } else { 316 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 317 } 318 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 319 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 320 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 321 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 322 } 323 } 324 325 // Don't shrink unless it's significant 326 if (shrink_bytes >= MinMetaspaceExpansion && 327 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 328 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 329 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 330 new_capacity_until_GC, 331 MetaspaceGCThresholdUpdater::ComputeNewSize); 332 } 333 } 334 335 // MetaspaceUtils 336 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0}; 337 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0}; 338 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0}; 339 340 // Collect used metaspace statistics. This involves walking the CLDG. The resulting 341 // output will be the accumulated values for all live metaspaces. 342 // Note: method does not do any locking. 343 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) { 344 out->reset(); 345 ClassLoaderDataGraphMetaspaceIterator iter; 346 while (iter.repeat()) { 347 ClassLoaderMetaspace* msp = iter.get_next(); 348 if (msp != NULL) { 349 msp->add_to_statistics(out); 350 } 351 } 352 } 353 354 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) { 355 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 356 return list == NULL ? 0 : list->free_bytes(); 357 } 358 359 size_t MetaspaceUtils::free_in_vs_bytes() { 360 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType); 361 } 362 363 static void inc_stat_nonatomically(size_t* pstat, size_t words) { 364 assert_lock_strong(MetaspaceExpand_lock); 365 (*pstat) += words; 366 } 367 368 static void dec_stat_nonatomically(size_t* pstat, size_t words) { 369 assert_lock_strong(MetaspaceExpand_lock); 370 const size_t size_now = *pstat; 371 assert(size_now >= words, "About to decrement counter below zero " 372 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".", 373 size_now, words); 374 *pstat = size_now - words; 375 } 376 377 static void inc_stat_atomically(volatile size_t* pstat, size_t words) { 378 Atomic::add(words, pstat); 379 } 380 381 static void dec_stat_atomically(volatile size_t* pstat, size_t words) { 382 const size_t size_now = *pstat; 383 assert(size_now >= words, "About to decrement counter below zero " 384 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".", 385 size_now, words); 386 Atomic::sub(words, pstat); 387 } 388 389 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 390 dec_stat_nonatomically(&_capacity_words[mdtype], words); 391 } 392 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 393 inc_stat_nonatomically(&_capacity_words[mdtype], words); 394 } 395 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) { 396 dec_stat_atomically(&_used_words[mdtype], words); 397 } 398 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) { 399 inc_stat_atomically(&_used_words[mdtype], words); 400 } 401 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) { 402 dec_stat_nonatomically(&_overhead_words[mdtype], words); 403 } 404 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) { 405 inc_stat_nonatomically(&_overhead_words[mdtype], words); 406 } 407 408 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) { 409 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 410 return list == NULL ? 0 : list->reserved_bytes(); 411 } 412 413 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) { 414 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 415 return list == NULL ? 0 : list->committed_bytes(); 416 } 417 418 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 419 420 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) { 421 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 422 if (chunk_manager == NULL) { 423 return 0; 424 } 425 chunk_manager->slow_verify(); 426 return chunk_manager->free_chunks_total_words(); 427 } 428 429 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 430 return free_chunks_total_words(mdtype) * BytesPerWord; 431 } 432 433 size_t MetaspaceUtils::free_chunks_total_words() { 434 return free_chunks_total_words(Metaspace::ClassType) + 435 free_chunks_total_words(Metaspace::NonClassType); 436 } 437 438 size_t MetaspaceUtils::free_chunks_total_bytes() { 439 return free_chunks_total_words() * BytesPerWord; 440 } 441 442 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) { 443 return Metaspace::get_chunk_manager(mdtype) != NULL; 444 } 445 446 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 447 if (!has_chunk_free_list(mdtype)) { 448 return MetaspaceChunkFreeListSummary(); 449 } 450 451 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 452 return cm->chunk_free_list_summary(); 453 } 454 455 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) { 456 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 457 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 458 } 459 460 void MetaspaceUtils::print_on(outputStream* out) { 461 Metaspace::MetadataType nct = Metaspace::NonClassType; 462 463 out->print_cr(" Metaspace " 464 "used " SIZE_FORMAT "K, " 465 "capacity " SIZE_FORMAT "K, " 466 "committed " SIZE_FORMAT "K, " 467 "reserved " SIZE_FORMAT "K", 468 used_bytes()/K, 469 capacity_bytes()/K, 470 committed_bytes()/K, 471 reserved_bytes()/K); 472 473 if (Metaspace::using_class_space()) { 474 Metaspace::MetadataType ct = Metaspace::ClassType; 475 out->print_cr(" class space " 476 "used " SIZE_FORMAT "K, " 477 "capacity " SIZE_FORMAT "K, " 478 "committed " SIZE_FORMAT "K, " 479 "reserved " SIZE_FORMAT "K", 480 used_bytes(ct)/K, 481 capacity_bytes(ct)/K, 482 committed_bytes(ct)/K, 483 reserved_bytes(ct)/K); 484 } 485 } 486 487 488 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) { 489 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord); 490 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord); 491 { 492 if (Metaspace::using_class_space()) { 493 out->print(" Non-class space: "); 494 } 495 print_scaled_words(out, reserved_nonclass_words, scale, 7); 496 out->print(" reserved, "); 497 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7); 498 out->print_cr(" committed "); 499 500 if (Metaspace::using_class_space()) { 501 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord); 502 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord); 503 out->print(" Class space: "); 504 print_scaled_words(out, reserved_class_words, scale, 7); 505 out->print(" reserved, "); 506 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7); 507 out->print_cr(" committed "); 508 509 const size_t reserved_words = reserved_nonclass_words + reserved_class_words; 510 const size_t committed_words = committed_nonclass_words + committed_class_words; 511 out->print(" Both: "); 512 print_scaled_words(out, reserved_words, scale, 7); 513 out->print(" reserved, "); 514 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7); 515 out->print_cr(" committed "); 516 } 517 } 518 } 519 520 // This will print out a basic metaspace usage report but 521 // unlike print_report() is guaranteed not to lock or to walk the CLDG. 522 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { 523 524 out->cr(); 525 out->print_cr("Usage:"); 526 527 if (Metaspace::using_class_space()) { 528 out->print(" Non-class: "); 529 } 530 531 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from 532 // MetaspaceUtils. 533 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType); 534 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType); 535 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType); 536 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc; 537 538 print_scaled_words(out, cap_nc, scale, 5); 539 out->print(" capacity, "); 540 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5); 541 out->print(" used, "); 542 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5); 543 out->print(" free+waste, "); 544 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5); 545 out->print(" overhead. "); 546 out->cr(); 547 548 if (Metaspace::using_class_space()) { 549 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType); 550 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType); 551 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); 552 const size_t free_and_waste_c = cap_c - overhead_c - used_c; 553 out->print(" Class: "); 554 print_scaled_words(out, cap_c, scale, 5); 555 out->print(" capacity, "); 556 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5); 557 out->print(" used, "); 558 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5); 559 out->print(" free+waste, "); 560 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5); 561 out->print(" overhead. "); 562 out->cr(); 563 564 out->print(" Both: "); 565 const size_t cap = cap_nc + cap_c; 566 567 print_scaled_words(out, cap, scale, 5); 568 out->print(" capacity, "); 569 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5); 570 out->print(" used, "); 571 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5); 572 out->print(" free+waste, "); 573 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5); 574 out->print(" overhead. "); 575 out->cr(); 576 } 577 578 out->cr(); 579 out->print_cr("Virtual space:"); 580 581 print_vs(out, scale); 582 583 out->cr(); 584 out->print_cr("Chunk freelists:"); 585 586 if (Metaspace::using_class_space()) { 587 out->print(" Non-Class: "); 588 } 589 print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale); 590 out->cr(); 591 if (Metaspace::using_class_space()) { 592 out->print(" Class: "); 593 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale); 594 out->cr(); 595 out->print(" Both: "); 596 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words() + 597 Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale); 598 out->cr(); 599 } 600 out->cr(); 601 602 } 603 604 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) { 605 606 const bool print_loaders = (flags & rf_show_loaders) > 0; 607 const bool print_classes = (flags & rf_show_classes) > 0; 608 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0; 609 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0; 610 611 // Some report options require walking the class loader data graph. 612 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype); 613 if (print_loaders) { 614 out->cr(); 615 out->print_cr("Usage per loader:"); 616 out->cr(); 617 } 618 619 ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print 620 621 // Print totals, broken up by space type. 622 if (print_by_spacetype) { 623 out->cr(); 624 out->print_cr("Usage per space type:"); 625 out->cr(); 626 for (int space_type = (int)Metaspace::ZeroMetaspaceType; 627 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++) 628 { 629 uintx num = cl._num_loaders_by_spacetype[space_type]; 630 out->print("%s (" UINTX_FORMAT " loader%s)%c", 631 space_type_name((Metaspace::MetaspaceType)space_type), 632 num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.')); 633 if (num > 0) { 634 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype); 635 } 636 out->cr(); 637 } 638 } 639 640 // Print totals for in-use data: 641 out->cr(); 642 out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c", 643 cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.')); 644 645 cl._stats_total.print_on(out, scale, print_by_chunktype); 646 647 // -- Print Virtual space. 648 out->cr(); 649 out->print_cr("Virtual space:"); 650 651 print_vs(out, scale); 652 653 // -- Print VirtualSpaceList details. 654 if ((flags & rf_show_vslist) > 0) { 655 out->cr(); 656 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : ""); 657 658 if (Metaspace::using_class_space()) { 659 out->print_cr(" Non-Class:"); 660 } 661 Metaspace::space_list()->print_on(out, scale); 662 if (Metaspace::using_class_space()) { 663 out->print_cr(" Class:"); 664 Metaspace::class_space_list()->print_on(out, scale); 665 } 666 } 667 out->cr(); 668 669 // -- Print VirtualSpaceList map. 670 if ((flags & rf_show_vsmap) > 0) { 671 out->cr(); 672 out->print_cr("Virtual space map:"); 673 674 if (Metaspace::using_class_space()) { 675 out->print_cr(" Non-Class:"); 676 } 677 Metaspace::space_list()->print_map(out); 678 if (Metaspace::using_class_space()) { 679 out->print_cr(" Class:"); 680 Metaspace::class_space_list()->print_map(out); 681 } 682 } 683 out->cr(); 684 685 // -- Print Freelists (ChunkManager) details 686 out->cr(); 687 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : ""); 688 689 ChunkManagerStatistics non_class_cm_stat; 690 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat); 691 692 if (Metaspace::using_class_space()) { 693 out->print_cr(" Non-Class:"); 694 } 695 non_class_cm_stat.print_on(out, scale); 696 697 if (Metaspace::using_class_space()) { 698 ChunkManagerStatistics class_cm_stat; 699 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat); 700 out->print_cr(" Class:"); 701 class_cm_stat.print_on(out, scale); 702 } 703 704 // As a convenience, print a summary of common waste. 705 out->cr(); 706 out->print("Waste "); 707 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace. 708 const size_t committed_words = committed_bytes() / BytesPerWord; 709 710 out->print("(percentages refer to total committed size "); 711 print_scaled_words(out, committed_words, scale); 712 out->print_cr("):"); 713 714 // Print space committed but not yet used by any class loader 715 const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord; 716 out->print(" Committed unused: "); 717 print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6); 718 out->cr(); 719 720 // Print waste for in-use chunks. 721 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals(); 722 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals(); 723 UsedChunksStatistics ucs_all; 724 ucs_all.add(ucs_nonclass); 725 ucs_all.add(ucs_class); 726 727 out->print(" Waste in chunks in use: "); 728 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6); 729 out->cr(); 730 out->print(" Free in chunks in use: "); 731 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6); 732 out->cr(); 733 out->print(" Overhead in chunks in use: "); 734 print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6); 735 out->cr(); 736 737 // Print waste in free chunks. 738 const size_t total_capacity_in_free_chunks = 739 Metaspace::chunk_manager_metadata()->free_chunks_total_words() + 740 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0); 741 out->print(" In free chunks: "); 742 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6); 743 out->cr(); 744 745 // Print waste in deallocated blocks. 746 const uintx free_blocks_num = 747 cl._stats_total.nonclass_sm_stats().free_blocks_num() + 748 cl._stats_total.class_sm_stats().free_blocks_num(); 749 const size_t free_blocks_cap_words = 750 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() + 751 cl._stats_total.class_sm_stats().free_blocks_cap_words(); 752 out->print("Deallocated from chunks in use: "); 753 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6); 754 out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num); 755 out->cr(); 756 757 // Print total waste. 758 const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks 759 + free_blocks_cap_words + unused_words_in_vs; 760 out->print(" -total-: "); 761 print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6); 762 out->cr(); 763 764 // Print internal statistics 765 #ifdef ASSERT 766 out->cr(); 767 out->cr(); 768 out->print_cr("Internal statistics:"); 769 out->cr(); 770 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs); 771 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births); 772 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths); 773 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created); 774 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged); 775 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded); 776 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs); 777 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks); 778 out->cr(); 779 #endif 780 781 // Print some interesting settings 782 out->cr(); 783 out->cr(); 784 out->print("MaxMetaspaceSize: "); 785 print_human_readable_size(out, MaxMetaspaceSize, scale); 786 out->cr(); 787 out->print("InitialBootClassLoaderMetaspaceSize: "); 788 print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale); 789 out->cr(); 790 791 out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false"); 792 out->cr(); 793 if (Metaspace::using_class_space()) { 794 out->print("CompressedClassSpaceSize: "); 795 print_human_readable_size(out, CompressedClassSpaceSize, scale); 796 } 797 798 out->cr(); 799 out->cr(); 800 801 } // MetaspaceUtils::print_report() 802 803 // Prints an ASCII representation of the given space. 804 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) { 805 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 806 const bool for_class = mdtype == Metaspace::ClassType ? true : false; 807 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); 808 if (vsl != NULL) { 809 if (for_class) { 810 if (!Metaspace::using_class_space()) { 811 out->print_cr("No Class Space."); 812 return; 813 } 814 out->print_raw("---- Metaspace Map (Class Space) ----"); 815 } else { 816 out->print_raw("---- Metaspace Map (Non-Class Space) ----"); 817 } 818 // Print legend: 819 out->cr(); 820 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous."); 821 out->cr(); 822 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); 823 vsl->print_map(out); 824 out->cr(); 825 } 826 } 827 828 void MetaspaceUtils::verify_free_chunks() { 829 Metaspace::chunk_manager_metadata()->verify(); 830 if (Metaspace::using_class_space()) { 831 Metaspace::chunk_manager_class()->verify(); 832 } 833 } 834 835 void MetaspaceUtils::verify_metrics() { 836 #ifdef ASSERT 837 // Please note: there are time windows where the internal counters are out of sync with 838 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk - 839 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will 840 // not be counted when iterating the CLDG. So be careful when you call this method. 841 ClassLoaderMetaspaceStatistics total_stat; 842 collect_statistics(&total_stat); 843 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals(); 844 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals(); 845 846 bool mismatch = false; 847 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) { 848 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i; 849 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals(); 850 if (capacity_words(mdtype) != chunk_stat.cap() || 851 used_words(mdtype) != chunk_stat.used() || 852 overhead_words(mdtype) != chunk_stat.overhead()) { 853 mismatch = true; 854 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype); 855 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".", 856 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype)); 857 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".", 858 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead()); 859 tty->flush(); 860 } 861 } 862 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch."); 863 #endif 864 } 865 866 // Utils to check if a pointer or range is part of a committed metaspace region. 867 metaspace::VirtualSpaceNode* MetaspaceUtils::find_enclosing_virtual_space(const void* p) { 868 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 869 VirtualSpaceNode* vsn = Metaspace::space_list()->find_enclosing_space(p); 870 if (Metaspace::using_class_space() && vsn == NULL) { 871 vsn = Metaspace::class_space_list()->find_enclosing_space(p); 872 } 873 return vsn; 874 } 875 876 bool MetaspaceUtils::is_range_in_committed(const void* from, const void* to) { 877 #if INCLUDE_CDS 878 if (UseSharedSpaces) { 879 for (int idx = MetaspaceShared::ro; idx <= MetaspaceShared::mc; idx++) { 880 if (FileMapInfo::current_info()->is_in_shared_region(from, idx)) { 881 return FileMapInfo::current_info()->is_in_shared_region(to, idx); 882 } 883 } 884 } 885 #endif 886 VirtualSpaceNode* vsn = find_enclosing_virtual_space(from); 887 return (vsn != NULL) && vsn->contains(to); 888 } 889 890 891 // Metaspace methods 892 893 size_t Metaspace::_first_chunk_word_size = 0; 894 size_t Metaspace::_first_class_chunk_word_size = 0; 895 896 size_t Metaspace::_commit_alignment = 0; 897 size_t Metaspace::_reserve_alignment = 0; 898 899 VirtualSpaceList* Metaspace::_space_list = NULL; 900 VirtualSpaceList* Metaspace::_class_space_list = NULL; 901 902 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 903 ChunkManager* Metaspace::_chunk_manager_class = NULL; 904 905 #define VIRTUALSPACEMULTIPLIER 2 906 907 #ifdef _LP64 908 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 909 910 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 911 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class."); 912 // Figure out the narrow_klass_base and the narrow_klass_shift. The 913 // narrow_klass_base is the lower of the metaspace base and the cds base 914 // (if cds is enabled). The narrow_klass_shift depends on the distance 915 // between the lower base and higher address. 916 address lower_base; 917 address higher_address; 918 #if INCLUDE_CDS 919 if (UseSharedSpaces) { 920 higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 921 (address)(metaspace_base + compressed_class_space_size())); 922 lower_base = MIN2(metaspace_base, cds_base); 923 } else 924 #endif 925 { 926 higher_address = metaspace_base + compressed_class_space_size(); 927 lower_base = metaspace_base; 928 929 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 930 // If compressed class space fits in lower 32G, we don't need a base. 931 if (higher_address <= (address)klass_encoding_max) { 932 lower_base = 0; // Effectively lower base is zero. 933 } 934 } 935 936 Universe::set_narrow_klass_base(lower_base); 937 938 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See 939 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for 940 // how dump time narrow_klass_shift is set. Although, CDS can work 941 // with zero-shift mode also, to be consistent with AOT it uses 942 // LogKlassAlignmentInBytes for klass shift so archived java heap objects 943 // can be used at same time as AOT code. 944 if (!UseSharedSpaces 945 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 946 Universe::set_narrow_klass_shift(0); 947 } else { 948 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 949 } 950 AOTLoader::set_narrow_klass_shift(); 951 } 952 953 #if INCLUDE_CDS 954 // Return TRUE if the specified metaspace_base and cds_base are close enough 955 // to work with compressed klass pointers. 956 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 957 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 958 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 959 address lower_base = MIN2((address)metaspace_base, cds_base); 960 address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 961 (address)(metaspace_base + compressed_class_space_size())); 962 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 963 } 964 #endif 965 966 // Try to allocate the metaspace at the requested addr. 967 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 968 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class."); 969 assert(using_class_space(), "called improperly"); 970 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 971 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 972 "Metaspace size is too big"); 973 assert_is_aligned(requested_addr, _reserve_alignment); 974 assert_is_aligned(cds_base, _reserve_alignment); 975 assert_is_aligned(compressed_class_space_size(), _reserve_alignment); 976 977 // Don't use large pages for the class space. 978 bool large_pages = false; 979 980 #if !(defined(AARCH64) || defined(AIX)) 981 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 982 _reserve_alignment, 983 large_pages, 984 requested_addr); 985 #else // AARCH64 986 ReservedSpace metaspace_rs; 987 988 // Our compressed klass pointers may fit nicely into the lower 32 989 // bits. 990 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 991 metaspace_rs = ReservedSpace(compressed_class_space_size(), 992 _reserve_alignment, 993 large_pages, 994 requested_addr); 995 } 996 997 if (! metaspace_rs.is_reserved()) { 998 // Aarch64: Try to align metaspace so that we can decode a compressed 999 // klass with a single MOVK instruction. We can do this iff the 1000 // compressed class base is a multiple of 4G. 1001 // Aix: Search for a place where we can find memory. If we need to load 1002 // the base, 4G alignment is helpful, too. 1003 size_t increment = AARCH64_ONLY(4*)G; 1004 for (char *a = align_up(requested_addr, increment); 1005 a < (char*)(1024*G); 1006 a += increment) { 1007 if (a == (char *)(32*G)) { 1008 // Go faster from here on. Zero-based is no longer possible. 1009 increment = 4*G; 1010 } 1011 1012 #if INCLUDE_CDS 1013 if (UseSharedSpaces 1014 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 1015 // We failed to find an aligned base that will reach. Fall 1016 // back to using our requested addr. 1017 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1018 _reserve_alignment, 1019 large_pages, 1020 requested_addr); 1021 break; 1022 } 1023 #endif 1024 1025 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1026 _reserve_alignment, 1027 large_pages, 1028 a); 1029 if (metaspace_rs.is_reserved()) 1030 break; 1031 } 1032 } 1033 1034 #endif // AARCH64 1035 1036 if (!metaspace_rs.is_reserved()) { 1037 #if INCLUDE_CDS 1038 if (UseSharedSpaces) { 1039 size_t increment = align_up(1*G, _reserve_alignment); 1040 1041 // Keep trying to allocate the metaspace, increasing the requested_addr 1042 // by 1GB each time, until we reach an address that will no longer allow 1043 // use of CDS with compressed klass pointers. 1044 char *addr = requested_addr; 1045 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 1046 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 1047 addr = addr + increment; 1048 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1049 _reserve_alignment, large_pages, addr); 1050 } 1051 } 1052 #endif 1053 // If no successful allocation then try to allocate the space anywhere. If 1054 // that fails then OOM doom. At this point we cannot try allocating the 1055 // metaspace as if UseCompressedClassPointers is off because too much 1056 // initialization has happened that depends on UseCompressedClassPointers. 1057 // So, UseCompressedClassPointers cannot be turned off at this point. 1058 if (!metaspace_rs.is_reserved()) { 1059 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1060 _reserve_alignment, large_pages); 1061 if (!metaspace_rs.is_reserved()) { 1062 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 1063 compressed_class_space_size())); 1064 } 1065 } 1066 } 1067 1068 // If we got here then the metaspace got allocated. 1069 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 1070 1071 #if INCLUDE_CDS 1072 // Verify that we can use shared spaces. Otherwise, turn off CDS. 1073 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 1074 FileMapInfo::stop_sharing_and_unmap( 1075 "Could not allocate metaspace at a compatible address"); 1076 } 1077 #endif 1078 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 1079 UseSharedSpaces ? (address)cds_base : 0); 1080 1081 initialize_class_space(metaspace_rs); 1082 1083 LogTarget(Trace, gc, metaspace) lt; 1084 if (lt.is_enabled()) { 1085 ResourceMark rm; 1086 LogStream ls(lt); 1087 print_compressed_class_space(&ls, requested_addr); 1088 } 1089 } 1090 1091 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 1092 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 1093 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 1094 if (_class_space_list != NULL) { 1095 address base = (address)_class_space_list->current_virtual_space()->bottom(); 1096 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 1097 compressed_class_space_size(), p2i(base)); 1098 if (requested_addr != 0) { 1099 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 1100 } 1101 st->cr(); 1102 } 1103 } 1104 1105 // For UseCompressedClassPointers the class space is reserved above the top of 1106 // the Java heap. The argument passed in is at the base of the compressed space. 1107 void Metaspace::initialize_class_space(ReservedSpace rs) { 1108 // The reserved space size may be bigger because of alignment, esp with UseLargePages 1109 assert(rs.size() >= CompressedClassSpaceSize, 1110 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 1111 assert(using_class_space(), "Must be using class space"); 1112 _class_space_list = new VirtualSpaceList(rs); 1113 _chunk_manager_class = new ChunkManager(true/*is_class*/); 1114 1115 if (!_class_space_list->initialization_succeeded()) { 1116 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 1117 } 1118 } 1119 1120 #endif 1121 1122 void Metaspace::ergo_initialize() { 1123 if (DumpSharedSpaces) { 1124 // Using large pages when dumping the shared archive is currently not implemented. 1125 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 1126 } 1127 1128 size_t page_size = os::vm_page_size(); 1129 if (UseLargePages && UseLargePagesInMetaspace) { 1130 page_size = os::large_page_size(); 1131 } 1132 1133 _commit_alignment = page_size; 1134 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 1135 1136 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 1137 // override if MaxMetaspaceSize was set on the command line or not. 1138 // This information is needed later to conform to the specification of the 1139 // java.lang.management.MemoryUsage API. 1140 // 1141 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 1142 // globals.hpp to the aligned value, but this is not possible, since the 1143 // alignment depends on other flags being parsed. 1144 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); 1145 1146 if (MetaspaceSize > MaxMetaspaceSize) { 1147 MetaspaceSize = MaxMetaspaceSize; 1148 } 1149 1150 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); 1151 1152 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 1153 1154 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); 1155 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 1156 1157 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 1158 1159 // Initial virtual space size will be calculated at global_initialize() 1160 size_t min_metaspace_sz = 1161 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize; 1162 if (UseCompressedClassPointers) { 1163 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) { 1164 if (min_metaspace_sz >= MaxMetaspaceSize) { 1165 vm_exit_during_initialization("MaxMetaspaceSize is too small."); 1166 } else { 1167 FLAG_SET_ERGO(size_t, CompressedClassSpaceSize, 1168 MaxMetaspaceSize - min_metaspace_sz); 1169 } 1170 } 1171 } else if (min_metaspace_sz >= MaxMetaspaceSize) { 1172 FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize, 1173 min_metaspace_sz); 1174 } 1175 1176 set_compressed_class_space_size(CompressedClassSpaceSize); 1177 } 1178 1179 void Metaspace::global_initialize() { 1180 MetaspaceGC::initialize(); 1181 1182 #if INCLUDE_CDS 1183 if (DumpSharedSpaces) { 1184 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces(); 1185 } else if (UseSharedSpaces) { 1186 // If any of the archived space fails to map, UseSharedSpaces 1187 // is reset to false. Fall through to the 1188 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class 1189 // metaspace. 1190 MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); 1191 } 1192 1193 if (!DumpSharedSpaces && !UseSharedSpaces) 1194 #endif // INCLUDE_CDS 1195 { 1196 #ifdef _LP64 1197 if (using_class_space()) { 1198 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 1199 allocate_metaspace_compressed_klass_ptrs(base, 0); 1200 } 1201 #endif // _LP64 1202 } 1203 1204 // Initialize these before initializing the VirtualSpaceList 1205 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 1206 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 1207 // Make the first class chunk bigger than a medium chunk so it's not put 1208 // on the medium chunk list. The next chunk will be small and progress 1209 // from there. This size calculated by -version. 1210 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 1211 (CompressedClassSpaceSize/BytesPerWord)*2); 1212 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 1213 // Arbitrarily set the initial virtual space to a multiple 1214 // of the boot class loader size. 1215 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 1216 word_size = align_up(word_size, Metaspace::reserve_alignment_words()); 1217 1218 // Initialize the list of virtual spaces. 1219 _space_list = new VirtualSpaceList(word_size); 1220 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/); 1221 1222 if (!_space_list->initialization_succeeded()) { 1223 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 1224 } 1225 1226 _tracer = new MetaspaceTracer(); 1227 } 1228 1229 void Metaspace::post_initialize() { 1230 MetaspaceGC::post_initialize(); 1231 } 1232 1233 void Metaspace::verify_global_initialization() { 1234 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 1235 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 1236 1237 if (using_class_space()) { 1238 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 1239 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 1240 } 1241 } 1242 1243 size_t Metaspace::align_word_size_up(size_t word_size) { 1244 size_t byte_size = word_size * wordSize; 1245 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 1246 } 1247 1248 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 1249 MetaspaceObj::Type type, TRAPS) { 1250 assert(!_frozen, "sanity"); 1251 assert(!(DumpSharedSpaces && THREAD->is_VM_thread()), "sanity"); 1252 1253 if (HAS_PENDING_EXCEPTION) { 1254 assert(false, "Should not allocate with exception pending"); 1255 return NULL; // caller does a CHECK_NULL too 1256 } 1257 1258 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 1259 "ClassLoaderData::the_null_class_loader_data() should have been used."); 1260 1261 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 1262 1263 // Try to allocate metadata. 1264 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 1265 1266 if (result == NULL) { 1267 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 1268 1269 // Allocation failed. 1270 if (is_init_completed()) { 1271 // Only start a GC if the bootstrapping has completed. 1272 // Try to clean out some heap memory and retry. This can prevent premature 1273 // expansion of the metaspace. 1274 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); 1275 } 1276 } 1277 1278 if (result == NULL) { 1279 if (DumpSharedSpaces) { 1280 // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM. 1281 // We should abort to avoid generating a potentially bad archive. 1282 vm_exit_during_cds_dumping(err_msg("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.", 1283 MetaspaceObj::type_name(type), word_size * BytesPerWord), 1284 err_msg("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize)); 1285 } 1286 report_metadata_oome(loader_data, word_size, type, mdtype, THREAD); 1287 assert(HAS_PENDING_EXCEPTION, "sanity"); 1288 return NULL; 1289 } 1290 1291 // Zero initialize. 1292 Copy::fill_to_words((HeapWord*)result, word_size, 0); 1293 1294 return result; 1295 } 1296 1297 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 1298 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 1299 1300 // If result is still null, we are out of memory. 1301 Log(gc, metaspace, freelist, oom) log; 1302 if (log.is_info()) { 1303 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 1304 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 1305 ResourceMark rm; 1306 if (log.is_debug()) { 1307 if (loader_data->metaspace_or_null() != NULL) { 1308 LogStream ls(log.debug()); 1309 loader_data->print_value_on(&ls); 1310 } 1311 } 1312 LogStream ls(log.info()); 1313 // In case of an OOM, log out a short but still useful report. 1314 MetaspaceUtils::print_basic_report(&ls, 0); 1315 } 1316 1317 bool out_of_compressed_class_space = false; 1318 if (is_class_space_allocation(mdtype)) { 1319 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null(); 1320 out_of_compressed_class_space = 1321 MetaspaceUtils::committed_bytes(Metaspace::ClassType) + 1322 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 1323 CompressedClassSpaceSize; 1324 } 1325 1326 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 1327 const char* space_string = out_of_compressed_class_space ? 1328 "Compressed class space" : "Metaspace"; 1329 1330 report_java_out_of_memory(space_string); 1331 1332 if (JvmtiExport::should_post_resource_exhausted()) { 1333 JvmtiExport::post_resource_exhausted( 1334 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 1335 space_string); 1336 } 1337 1338 if (!is_init_completed()) { 1339 vm_exit_during_initialization("OutOfMemoryError", space_string); 1340 } 1341 1342 if (out_of_compressed_class_space) { 1343 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 1344 } else { 1345 THROW_OOP(Universe::out_of_memory_error_metaspace()); 1346 } 1347 } 1348 1349 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 1350 switch (mdtype) { 1351 case Metaspace::ClassType: return "Class"; 1352 case Metaspace::NonClassType: return "Metadata"; 1353 default: 1354 assert(false, "Got bad mdtype: %d", (int) mdtype); 1355 return NULL; 1356 } 1357 } 1358 1359 void Metaspace::purge(MetadataType mdtype) { 1360 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 1361 } 1362 1363 void Metaspace::purge() { 1364 MutexLockerEx cl(MetaspaceExpand_lock, 1365 Mutex::_no_safepoint_check_flag); 1366 purge(NonClassType); 1367 if (using_class_space()) { 1368 purge(ClassType); 1369 } 1370 } 1371 1372 bool Metaspace::contains(const void* ptr) { 1373 if (MetaspaceShared::is_in_shared_metaspace(ptr)) { 1374 return true; 1375 } 1376 return contains_non_shared(ptr); 1377 } 1378 1379 bool Metaspace::contains_non_shared(const void* ptr) { 1380 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 1381 return true; 1382 } 1383 1384 return get_space_list(NonClassType)->contains(ptr); 1385 } 1386 1387 // ClassLoaderMetaspace 1388 1389 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) 1390 : _space_type(type) 1391 , _lock(lock) 1392 , _vsm(NULL) 1393 , _class_vsm(NULL) 1394 { 1395 initialize(lock, type); 1396 } 1397 1398 ClassLoaderMetaspace::~ClassLoaderMetaspace() { 1399 Metaspace::assert_not_frozen(); 1400 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths)); 1401 delete _vsm; 1402 if (Metaspace::using_class_space()) { 1403 delete _class_vsm; 1404 } 1405 } 1406 1407 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { 1408 Metachunk* chunk = get_initialization_chunk(type, mdtype); 1409 if (chunk != NULL) { 1410 // Add to this manager's list of chunks in use and make it the current_chunk(). 1411 get_space_manager(mdtype)->add_chunk(chunk, true); 1412 } 1413 } 1414 1415 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { 1416 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 1417 1418 // Get a chunk from the chunk freelist 1419 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 1420 1421 if (chunk == NULL) { 1422 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size, 1423 get_space_manager(mdtype)->medium_chunk_bunch()); 1424 } 1425 1426 return chunk; 1427 } 1428 1429 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) { 1430 Metaspace::verify_global_initialization(); 1431 1432 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births)); 1433 1434 // Allocate SpaceManager for metadata objects. 1435 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock); 1436 1437 if (Metaspace::using_class_space()) { 1438 // Allocate SpaceManager for classes. 1439 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock); 1440 } 1441 1442 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 1443 1444 // Allocate chunk for metadata objects 1445 initialize_first_chunk(type, Metaspace::NonClassType); 1446 1447 // Allocate chunk for class metadata objects 1448 if (Metaspace::using_class_space()) { 1449 initialize_first_chunk(type, Metaspace::ClassType); 1450 } 1451 } 1452 1453 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) { 1454 Metaspace::assert_not_frozen(); 1455 1456 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs)); 1457 1458 // Don't use class_vsm() unless UseCompressedClassPointers is true. 1459 if (Metaspace::is_class_space_allocation(mdtype)) { 1460 return class_vsm()->allocate(word_size); 1461 } else { 1462 return vsm()->allocate(word_size); 1463 } 1464 } 1465 1466 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) { 1467 Metaspace::assert_not_frozen(); 1468 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 1469 assert(delta_bytes > 0, "Must be"); 1470 1471 size_t before = 0; 1472 size_t after = 0; 1473 MetaWord* res; 1474 bool incremented; 1475 1476 // Each thread increments the HWM at most once. Even if the thread fails to increment 1477 // the HWM, an allocation is still attempted. This is because another thread must then 1478 // have incremented the HWM and therefore the allocation might still succeed. 1479 do { 1480 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 1481 res = allocate(word_size, mdtype); 1482 } while (!incremented && res == NULL); 1483 1484 if (incremented) { 1485 Metaspace::tracer()->report_gc_threshold(before, after, 1486 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 1487 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 1488 } 1489 1490 return res; 1491 } 1492 1493 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const { 1494 return (vsm()->used_words() + 1495 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord; 1496 } 1497 1498 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const { 1499 return (vsm()->capacity_words() + 1500 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord; 1501 } 1502 1503 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 1504 Metaspace::assert_not_frozen(); 1505 assert(!SafepointSynchronize::is_at_safepoint() 1506 || Thread::current()->is_VM_thread(), "should be the VM thread"); 1507 1508 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs)); 1509 1510 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 1511 1512 if (is_class && Metaspace::using_class_space()) { 1513 class_vsm()->deallocate(ptr, word_size); 1514 } else { 1515 vsm()->deallocate(ptr, word_size); 1516 } 1517 } 1518 1519 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) { 1520 assert(Metaspace::using_class_space(), "Has to use class space"); 1521 return class_vsm()->calc_chunk_size(word_size); 1522 } 1523 1524 void ClassLoaderMetaspace::print_on(outputStream* out) const { 1525 // Print both class virtual space counts and metaspace. 1526 if (Verbose) { 1527 vsm()->print_on(out); 1528 if (Metaspace::using_class_space()) { 1529 class_vsm()->print_on(out); 1530 } 1531 } 1532 } 1533 1534 void ClassLoaderMetaspace::verify() { 1535 vsm()->verify(); 1536 if (Metaspace::using_class_space()) { 1537 class_vsm()->verify(); 1538 } 1539 } 1540 1541 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const { 1542 assert_lock_strong(lock()); 1543 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats()); 1544 if (Metaspace::using_class_space()) { 1545 class_vsm()->add_to_statistics_locked(&out->class_sm_stats()); 1546 } 1547 } 1548 1549 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const { 1550 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1551 add_to_statistics_locked(out); 1552 } 1553 1554 /////////////// Unit tests /////////////// 1555 1556 #ifndef PRODUCT 1557 1558 class TestMetaspaceUtilsTest : AllStatic { 1559 public: 1560 static void test_reserved() { 1561 size_t reserved = MetaspaceUtils::reserved_bytes(); 1562 1563 assert(reserved > 0, "assert"); 1564 1565 size_t committed = MetaspaceUtils::committed_bytes(); 1566 assert(committed <= reserved, "assert"); 1567 1568 size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType); 1569 assert(reserved_metadata > 0, "assert"); 1570 assert(reserved_metadata <= reserved, "assert"); 1571 1572 if (UseCompressedClassPointers) { 1573 size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType); 1574 assert(reserved_class > 0, "assert"); 1575 assert(reserved_class < reserved, "assert"); 1576 } 1577 } 1578 1579 static void test_committed() { 1580 size_t committed = MetaspaceUtils::committed_bytes(); 1581 1582 assert(committed > 0, "assert"); 1583 1584 size_t reserved = MetaspaceUtils::reserved_bytes(); 1585 assert(committed <= reserved, "assert"); 1586 1587 size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType); 1588 assert(committed_metadata > 0, "assert"); 1589 assert(committed_metadata <= committed, "assert"); 1590 1591 if (UseCompressedClassPointers) { 1592 size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType); 1593 assert(committed_class > 0, "assert"); 1594 assert(committed_class < committed, "assert"); 1595 } 1596 } 1597 1598 static void test_virtual_space_list_large_chunk() { 1599 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 1600 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 1601 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 1602 // vm_allocation_granularity aligned on Windows. 1603 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 1604 large_size += (os::vm_page_size()/BytesPerWord); 1605 vs_list->get_new_chunk(large_size, 0); 1606 } 1607 1608 static void test() { 1609 test_reserved(); 1610 test_committed(); 1611 test_virtual_space_list_large_chunk(); 1612 } 1613 }; 1614 1615 void TestMetaspaceUtils_test() { 1616 TestMetaspaceUtilsTest::test(); 1617 } 1618 1619 #endif // !PRODUCT 1620 1621 struct chunkmanager_statistics_t { 1622 int num_specialized_chunks; 1623 int num_small_chunks; 1624 int num_medium_chunks; 1625 int num_humongous_chunks; 1626 }; 1627 1628 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) { 1629 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType); 1630 ChunkManagerStatistics stat; 1631 chunk_manager->collect_statistics(&stat); 1632 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num(); 1633 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num(); 1634 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num(); 1635 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num(); 1636 } 1637 1638 struct chunk_geometry_t { 1639 size_t specialized_chunk_word_size; 1640 size_t small_chunk_word_size; 1641 size_t medium_chunk_word_size; 1642 }; 1643 1644 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) { 1645 if (mdType == Metaspace::NonClassType) { 1646 out->specialized_chunk_word_size = SpecializedChunk; 1647 out->small_chunk_word_size = SmallChunk; 1648 out->medium_chunk_word_size = MediumChunk; 1649 } else { 1650 out->specialized_chunk_word_size = ClassSpecializedChunk; 1651 out->small_chunk_word_size = ClassSmallChunk; 1652 out->medium_chunk_word_size = ClassMediumChunk; 1653 } 1654 }