1 /* 2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "aot/aotLoader.hpp" 28 #include "logging/log.hpp" 29 #include "logging/logStream.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/metaspace.hpp" 32 #include "memory/metaspace/chunkManager.hpp" 33 #include "memory/metaspace/metachunk.hpp" 34 #include "memory/metaspace/metaspaceCommon.hpp" 35 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp" 36 #include "memory/metaspace/spaceManager.hpp" 37 #include "memory/metaspace/virtualSpaceList.hpp" 38 #include "memory/metaspaceShared.hpp" 39 #include "memory/metaspaceTracer.hpp" 40 #include "runtime/init.hpp" 41 #include "runtime/orderAccess.inline.hpp" 42 #include "services/memTracker.hpp" 43 #include "utilities/copy.hpp" 44 #include "utilities/debug.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 47 48 using namespace metaspace; 49 50 MetaWord* last_allocated = 0; 51 52 size_t Metaspace::_compressed_class_space_size; 53 const MetaspaceTracer* Metaspace::_tracer = NULL; 54 55 DEBUG_ONLY(bool Metaspace::_frozen = false;) 56 57 static const char* space_type_name(Metaspace::MetaspaceType t) { 58 const char* s = NULL; 59 switch (t) { 60 case Metaspace::StandardMetaspaceType: s = "Standard"; break; 61 case Metaspace::BootMetaspaceType: s = "Boot"; break; 62 case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break; 63 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break; 64 default: ShouldNotReachHere(); 65 } 66 return s; 67 } 68 69 volatile size_t MetaspaceGC::_capacity_until_GC = 0; 70 uint MetaspaceGC::_shrink_factor = 0; 71 bool MetaspaceGC::_should_concurrent_collect = false; 72 73 // BlockFreelist methods 74 75 // VirtualSpaceNode methods 76 77 // MetaspaceGC methods 78 79 // VM_CollectForMetadataAllocation is the vm operation used to GC. 80 // Within the VM operation after the GC the attempt to allocate the metadata 81 // should succeed. If the GC did not free enough space for the metaspace 82 // allocation, the HWM is increased so that another virtualspace will be 83 // allocated for the metadata. With perm gen the increase in the perm 84 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 85 // metaspace policy uses those as the small and large steps for the HWM. 86 // 87 // After the GC the compute_new_size() for MetaspaceGC is called to 88 // resize the capacity of the metaspaces. The current implementation 89 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 90 // to resize the Java heap by some GC's. New flags can be implemented 91 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 92 // free space is desirable in the metaspace capacity to decide how much 93 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 94 // free space is desirable in the metaspace capacity before decreasing 95 // the HWM. 96 97 // Calculate the amount to increase the high water mark (HWM). 98 // Increase by a minimum amount (MinMetaspaceExpansion) so that 99 // another expansion is not requested too soon. If that is not 100 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 101 // If that is still not enough, expand by the size of the allocation 102 // plus some. 103 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 104 size_t min_delta = MinMetaspaceExpansion; 105 size_t max_delta = MaxMetaspaceExpansion; 106 size_t delta = align_up(bytes, Metaspace::commit_alignment()); 107 108 if (delta <= min_delta) { 109 delta = min_delta; 110 } else if (delta <= max_delta) { 111 // Don't want to hit the high water mark on the next 112 // allocation so make the delta greater than just enough 113 // for this allocation. 114 delta = max_delta; 115 } else { 116 // This allocation is large but the next ones are probably not 117 // so increase by the minimum. 118 delta = delta + min_delta; 119 } 120 121 assert_is_aligned(delta, Metaspace::commit_alignment()); 122 123 return delta; 124 } 125 126 size_t MetaspaceGC::capacity_until_GC() { 127 size_t value = OrderAccess::load_acquire(&_capacity_until_GC); 128 assert(value >= MetaspaceSize, "Not initialized properly?"); 129 return value; 130 } 131 132 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 133 assert_is_aligned(v, Metaspace::commit_alignment()); 134 135 size_t capacity_until_GC = _capacity_until_GC; 136 size_t new_value = capacity_until_GC + v; 137 138 if (new_value < capacity_until_GC) { 139 // The addition wrapped around, set new_value to aligned max value. 140 new_value = align_down(max_uintx, Metaspace::commit_alignment()); 141 } 142 143 size_t expected = _capacity_until_GC; 144 size_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected); 145 146 if (expected != actual) { 147 return false; 148 } 149 150 if (new_cap_until_GC != NULL) { 151 *new_cap_until_GC = new_value; 152 } 153 if (old_cap_until_GC != NULL) { 154 *old_cap_until_GC = capacity_until_GC; 155 } 156 return true; 157 } 158 159 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 160 assert_is_aligned(v, Metaspace::commit_alignment()); 161 162 return Atomic::sub(v, &_capacity_until_GC); 163 } 164 165 void MetaspaceGC::initialize() { 166 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 167 // we can't do a GC during initialization. 168 _capacity_until_GC = MaxMetaspaceSize; 169 } 170 171 void MetaspaceGC::post_initialize() { 172 // Reset the high-water mark once the VM initialization is done. 173 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); 174 } 175 176 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 177 // Check if the compressed class space is full. 178 if (is_class && Metaspace::using_class_space()) { 179 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); 180 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 181 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", 182 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); 183 return false; 184 } 185 } 186 187 // Check if the user has imposed a limit on the metaspace memory. 188 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 189 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 190 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", 191 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); 192 return false; 193 } 194 195 return true; 196 } 197 198 size_t MetaspaceGC::allowed_expansion() { 199 size_t committed_bytes = MetaspaceUtils::committed_bytes(); 200 size_t capacity_until_gc = capacity_until_GC(); 201 202 assert(capacity_until_gc >= committed_bytes, 203 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 204 capacity_until_gc, committed_bytes); 205 206 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 207 size_t left_until_GC = capacity_until_gc - committed_bytes; 208 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 209 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT 210 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", 211 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); 212 213 return left_to_commit / BytesPerWord; 214 } 215 216 void MetaspaceGC::compute_new_size() { 217 assert(_shrink_factor <= 100, "invalid shrink factor"); 218 uint current_shrink_factor = _shrink_factor; 219 _shrink_factor = 0; 220 221 // Using committed_bytes() for used_after_gc is an overestimation, since the 222 // chunk free lists are included in committed_bytes() and the memory in an 223 // un-fragmented chunk free list is available for future allocations. 224 // However, if the chunk free lists becomes fragmented, then the memory may 225 // not be available for future allocations and the memory is therefore "in use". 226 // Including the chunk free lists in the definition of "in use" is therefore 227 // necessary. Not including the chunk free lists can cause capacity_until_GC to 228 // shrink below committed_bytes() and this has caused serious bugs in the past. 229 const size_t used_after_gc = MetaspaceUtils::committed_bytes(); 230 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 231 232 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 233 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 234 235 const double min_tmp = used_after_gc / maximum_used_percentage; 236 size_t minimum_desired_capacity = 237 (size_t)MIN2(min_tmp, double(max_uintx)); 238 // Don't shrink less than the initial generation size 239 minimum_desired_capacity = MAX2(minimum_desired_capacity, 240 MetaspaceSize); 241 242 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 243 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 244 minimum_free_percentage, maximum_used_percentage); 245 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 246 247 248 size_t shrink_bytes = 0; 249 if (capacity_until_GC < minimum_desired_capacity) { 250 // If we have less capacity below the metaspace HWM, then 251 // increment the HWM. 252 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 253 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); 254 // Don't expand unless it's significant 255 if (expand_bytes >= MinMetaspaceExpansion) { 256 size_t new_capacity_until_GC = 0; 257 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 258 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 259 260 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 261 new_capacity_until_GC, 262 MetaspaceGCThresholdUpdater::ComputeNewSize); 263 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 264 minimum_desired_capacity / (double) K, 265 expand_bytes / (double) K, 266 MinMetaspaceExpansion / (double) K, 267 new_capacity_until_GC / (double) K); 268 } 269 return; 270 } 271 272 // No expansion, now see if we want to shrink 273 // We would never want to shrink more than this 274 assert(capacity_until_GC >= minimum_desired_capacity, 275 SIZE_FORMAT " >= " SIZE_FORMAT, 276 capacity_until_GC, minimum_desired_capacity); 277 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 278 279 // Should shrinking be considered? 280 if (MaxMetaspaceFreeRatio < 100) { 281 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 282 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 283 const double max_tmp = used_after_gc / minimum_used_percentage; 284 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 285 maximum_desired_capacity = MAX2(maximum_desired_capacity, 286 MetaspaceSize); 287 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 288 maximum_free_percentage, minimum_used_percentage); 289 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 290 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 291 292 assert(minimum_desired_capacity <= maximum_desired_capacity, 293 "sanity check"); 294 295 if (capacity_until_GC > maximum_desired_capacity) { 296 // Capacity too large, compute shrinking size 297 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 298 // We don't want shrink all the way back to initSize if people call 299 // System.gc(), because some programs do that between "phases" and then 300 // we'd just have to grow the heap up again for the next phase. So we 301 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 302 // on the third call, and 100% by the fourth call. But if we recompute 303 // size without shrinking, it goes back to 0%. 304 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 305 306 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); 307 308 assert(shrink_bytes <= max_shrink_bytes, 309 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 310 shrink_bytes, max_shrink_bytes); 311 if (current_shrink_factor == 0) { 312 _shrink_factor = 10; 313 } else { 314 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 315 } 316 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 317 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 318 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 319 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 320 } 321 } 322 323 // Don't shrink unless it's significant 324 if (shrink_bytes >= MinMetaspaceExpansion && 325 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 326 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 327 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 328 new_capacity_until_GC, 329 MetaspaceGCThresholdUpdater::ComputeNewSize); 330 } 331 } 332 333 // MetaspaceUtils 334 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0}; 335 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0}; 336 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0}; 337 338 // Collect used metaspace statistics. This involves walking the CLDG. The resulting 339 // output will be the accumulated values for all live metaspaces. 340 // Note: method does not do any locking. 341 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) { 342 out->reset(); 343 ClassLoaderDataGraphMetaspaceIterator iter; 344 while (iter.repeat()) { 345 ClassLoaderMetaspace* msp = iter.get_next(); 346 if (msp != NULL) { 347 msp->add_to_statistics(out); 348 } 349 } 350 } 351 352 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) { 353 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 354 return list == NULL ? 0 : list->free_bytes(); 355 } 356 357 size_t MetaspaceUtils::free_in_vs_bytes() { 358 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType); 359 } 360 361 static void inc_stat_nonatomically(size_t* pstat, size_t words) { 362 assert_lock_strong(MetaspaceExpand_lock); 363 (*pstat) += words; 364 } 365 366 static void dec_stat_nonatomically(size_t* pstat, size_t words) { 367 assert_lock_strong(MetaspaceExpand_lock); 368 const size_t size_now = *pstat; 369 assert(size_now >= words, "About to decrement counter below zero " 370 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".", 371 size_now, words); 372 *pstat = size_now - words; 373 } 374 375 static void inc_stat_atomically(volatile size_t* pstat, size_t words) { 376 Atomic::add(words, pstat); 377 } 378 379 static void dec_stat_atomically(volatile size_t* pstat, size_t words) { 380 const size_t size_now = *pstat; 381 assert(size_now >= words, "About to decrement counter below zero " 382 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".", 383 size_now, words); 384 Atomic::sub(words, pstat); 385 } 386 387 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 388 dec_stat_nonatomically(&_capacity_words[mdtype], words); 389 } 390 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 391 inc_stat_nonatomically(&_capacity_words[mdtype], words); 392 } 393 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) { 394 dec_stat_atomically(&_used_words[mdtype], words); 395 } 396 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) { 397 inc_stat_atomically(&_used_words[mdtype], words); 398 } 399 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) { 400 dec_stat_nonatomically(&_overhead_words[mdtype], words); 401 } 402 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) { 403 inc_stat_nonatomically(&_overhead_words[mdtype], words); 404 } 405 406 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) { 407 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 408 return list == NULL ? 0 : list->reserved_bytes(); 409 } 410 411 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) { 412 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 413 return list == NULL ? 0 : list->committed_bytes(); 414 } 415 416 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 417 418 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) { 419 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 420 if (chunk_manager == NULL) { 421 return 0; 422 } 423 chunk_manager->slow_verify(); 424 return chunk_manager->free_chunks_total_words(); 425 } 426 427 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 428 return free_chunks_total_words(mdtype) * BytesPerWord; 429 } 430 431 size_t MetaspaceUtils::free_chunks_total_words() { 432 return free_chunks_total_words(Metaspace::ClassType) + 433 free_chunks_total_words(Metaspace::NonClassType); 434 } 435 436 size_t MetaspaceUtils::free_chunks_total_bytes() { 437 return free_chunks_total_words() * BytesPerWord; 438 } 439 440 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) { 441 return Metaspace::get_chunk_manager(mdtype) != NULL; 442 } 443 444 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 445 if (!has_chunk_free_list(mdtype)) { 446 return MetaspaceChunkFreeListSummary(); 447 } 448 449 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 450 return cm->chunk_free_list_summary(); 451 } 452 453 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) { 454 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 455 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 456 } 457 458 void MetaspaceUtils::print_on(outputStream* out) { 459 Metaspace::MetadataType nct = Metaspace::NonClassType; 460 461 out->print_cr(" Metaspace " 462 "used " SIZE_FORMAT "K, " 463 "capacity " SIZE_FORMAT "K, " 464 "committed " SIZE_FORMAT "K, " 465 "reserved " SIZE_FORMAT "K", 466 used_bytes()/K, 467 capacity_bytes()/K, 468 committed_bytes()/K, 469 reserved_bytes()/K); 470 471 if (Metaspace::using_class_space()) { 472 Metaspace::MetadataType ct = Metaspace::ClassType; 473 out->print_cr(" class space " 474 "used " SIZE_FORMAT "K, " 475 "capacity " SIZE_FORMAT "K, " 476 "committed " SIZE_FORMAT "K, " 477 "reserved " SIZE_FORMAT "K", 478 used_bytes(ct)/K, 479 capacity_bytes(ct)/K, 480 committed_bytes(ct)/K, 481 reserved_bytes(ct)/K); 482 } 483 } 484 485 486 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) { 487 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord); 488 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord); 489 { 490 if (Metaspace::using_class_space()) { 491 out->print(" Non-class space: "); 492 } 493 print_scaled_words(out, reserved_nonclass_words, scale, 7); 494 out->print(" reserved, "); 495 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7); 496 out->print_cr(" committed "); 497 498 if (Metaspace::using_class_space()) { 499 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord); 500 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord); 501 out->print(" Class space: "); 502 print_scaled_words(out, reserved_class_words, scale, 7); 503 out->print(" reserved, "); 504 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7); 505 out->print_cr(" committed "); 506 507 const size_t reserved_words = reserved_nonclass_words + reserved_class_words; 508 const size_t committed_words = committed_nonclass_words + committed_class_words; 509 out->print(" Both: "); 510 print_scaled_words(out, reserved_words, scale, 7); 511 out->print(" reserved, "); 512 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7); 513 out->print_cr(" committed "); 514 } 515 } 516 } 517 518 // This will print out a basic metaspace usage report but 519 // unlike print_report() is guaranteed not to lock or to walk the CLDG. 520 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { 521 522 out->cr(); 523 out->print_cr("Usage:"); 524 525 if (Metaspace::using_class_space()) { 526 out->print(" Non-class: "); 527 } 528 529 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from 530 // MetaspaceUtils. 531 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType); 532 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType); 533 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType); 534 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc; 535 536 print_scaled_words(out, cap_nc, scale, 5); 537 out->print(" capacity, "); 538 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5); 539 out->print(" used, "); 540 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5); 541 out->print(" free+waste, "); 542 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5); 543 out->print(" overhead. "); 544 out->cr(); 545 546 if (Metaspace::using_class_space()) { 547 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType); 548 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType); 549 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); 550 const size_t free_and_waste_c = cap_c - overhead_c - used_c; 551 out->print(" Class: "); 552 print_scaled_words(out, cap_c, scale, 5); 553 out->print(" capacity, "); 554 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5); 555 out->print(" used, "); 556 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5); 557 out->print(" free+waste, "); 558 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5); 559 out->print(" overhead. "); 560 out->cr(); 561 562 out->print(" Both: "); 563 const size_t cap = cap_nc + cap_c; 564 565 print_scaled_words(out, cap, scale, 5); 566 out->print(" capacity, "); 567 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5); 568 out->print(" used, "); 569 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5); 570 out->print(" free+waste, "); 571 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5); 572 out->print(" overhead. "); 573 out->cr(); 574 } 575 576 out->cr(); 577 out->print_cr("Virtual space:"); 578 579 print_vs(out, scale); 580 581 out->cr(); 582 out->print_cr("Chunk freelists:"); 583 584 if (Metaspace::using_class_space()) { 585 out->print(" Non-Class: "); 586 } 587 print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale); 588 out->cr(); 589 if (Metaspace::using_class_space()) { 590 out->print(" Class: "); 591 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale); 592 out->cr(); 593 out->print(" Both: "); 594 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words() + 595 Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale); 596 out->cr(); 597 } 598 out->cr(); 599 600 } 601 602 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) { 603 604 const bool print_loaders = (flags & rf_show_loaders) > 0; 605 const bool print_classes = (flags & rf_show_classes) > 0; 606 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0; 607 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0; 608 609 // Some report options require walking the class loader data graph. 610 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype); 611 if (print_loaders) { 612 out->cr(); 613 out->print_cr("Usage per loader:"); 614 out->cr(); 615 } 616 617 ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print 618 619 // Print totals, broken up by space type. 620 if (print_by_spacetype) { 621 out->cr(); 622 out->print_cr("Usage per space type:"); 623 out->cr(); 624 for (int space_type = (int)Metaspace::ZeroMetaspaceType; 625 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++) 626 { 627 uintx num = cl._num_loaders_by_spacetype[space_type]; 628 out->print("%s (" UINTX_FORMAT " loader%s)%c", 629 space_type_name((Metaspace::MetaspaceType)space_type), 630 num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.')); 631 if (num > 0) { 632 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype); 633 } 634 out->cr(); 635 } 636 } 637 638 // Print totals for in-use data: 639 out->cr(); 640 out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c", 641 cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.')); 642 643 cl._stats_total.print_on(out, scale, print_by_chunktype); 644 645 // -- Print Virtual space. 646 out->cr(); 647 out->print_cr("Virtual space:"); 648 649 print_vs(out, scale); 650 651 // -- Print VirtualSpaceList details. 652 if ((flags & rf_show_vslist) > 0) { 653 out->cr(); 654 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : ""); 655 656 if (Metaspace::using_class_space()) { 657 out->print_cr(" Non-Class:"); 658 } 659 Metaspace::space_list()->print_on(out, scale); 660 if (Metaspace::using_class_space()) { 661 out->print_cr(" Class:"); 662 Metaspace::class_space_list()->print_on(out, scale); 663 } 664 } 665 out->cr(); 666 667 // -- Print VirtualSpaceList map. 668 if ((flags & rf_show_vsmap) > 0) { 669 out->cr(); 670 out->print_cr("Virtual space map:"); 671 672 if (Metaspace::using_class_space()) { 673 out->print_cr(" Non-Class:"); 674 } 675 Metaspace::space_list()->print_map(out); 676 if (Metaspace::using_class_space()) { 677 out->print_cr(" Class:"); 678 Metaspace::class_space_list()->print_map(out); 679 } 680 } 681 out->cr(); 682 683 // -- Print Freelists (ChunkManager) details 684 out->cr(); 685 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : ""); 686 687 ChunkManagerStatistics non_class_cm_stat; 688 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat); 689 690 if (Metaspace::using_class_space()) { 691 out->print_cr(" Non-Class:"); 692 } 693 non_class_cm_stat.print_on(out, scale); 694 695 if (Metaspace::using_class_space()) { 696 ChunkManagerStatistics class_cm_stat; 697 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat); 698 out->print_cr(" Class:"); 699 class_cm_stat.print_on(out, scale); 700 } 701 702 // As a convenience, print a summary of common waste. 703 out->cr(); 704 out->print("Waste "); 705 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace. 706 const size_t committed_words = committed_bytes() / BytesPerWord; 707 708 out->print("(percentages refer to total committed size "); 709 print_scaled_words(out, committed_words, scale); 710 out->print_cr("):"); 711 712 // Print space committed but not yet used by any class loader 713 const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord; 714 out->print(" Committed unused: "); 715 print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6); 716 out->cr(); 717 718 // Print waste for in-use chunks. 719 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals(); 720 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals(); 721 UsedChunksStatistics ucs_all; 722 ucs_all.add(ucs_nonclass); 723 ucs_all.add(ucs_class); 724 725 out->print(" Waste in chunks in use: "); 726 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6); 727 out->cr(); 728 out->print(" Free in chunks in use: "); 729 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6); 730 out->cr(); 731 out->print(" Overhead in chunks in use: "); 732 print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6); 733 out->cr(); 734 735 // Print waste in free chunks. 736 const size_t total_capacity_in_free_chunks = 737 Metaspace::chunk_manager_metadata()->free_chunks_total_words() + 738 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0); 739 out->print(" In free chunks: "); 740 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6); 741 out->cr(); 742 743 // Print waste in deallocated blocks. 744 const uintx free_blocks_num = 745 cl._stats_total.nonclass_sm_stats().free_blocks_num() + 746 cl._stats_total.class_sm_stats().free_blocks_num(); 747 const size_t free_blocks_cap_words = 748 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() + 749 cl._stats_total.class_sm_stats().free_blocks_cap_words(); 750 out->print("Deallocated from chunks in use: "); 751 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6); 752 out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num); 753 out->cr(); 754 755 // Print total waste. 756 const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks 757 + free_blocks_cap_words + unused_words_in_vs; 758 out->print(" -total-: "); 759 print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6); 760 out->cr(); 761 762 // Print internal statistics 763 #ifdef ASSERT 764 out->cr(); 765 out->cr(); 766 out->print_cr("Internal statistics:"); 767 out->cr(); 768 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs); 769 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births); 770 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths); 771 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created); 772 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged); 773 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded); 774 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs); 775 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks); 776 out->cr(); 777 #endif 778 779 // Print some interesting settings 780 out->cr(); 781 out->cr(); 782 out->print("MaxMetaspaceSize: "); 783 print_human_readable_size(out, MaxMetaspaceSize, scale); 784 out->cr(); 785 out->print("InitialBootClassLoaderMetaspaceSize: "); 786 print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale); 787 out->cr(); 788 789 out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false"); 790 out->cr(); 791 if (Metaspace::using_class_space()) { 792 out->print("CompressedClassSpaceSize: "); 793 print_human_readable_size(out, CompressedClassSpaceSize, scale); 794 } 795 796 out->cr(); 797 out->cr(); 798 799 } // MetaspaceUtils::print_report() 800 801 // Prints an ASCII representation of the given space. 802 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) { 803 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 804 const bool for_class = mdtype == Metaspace::ClassType ? true : false; 805 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); 806 if (vsl != NULL) { 807 if (for_class) { 808 if (!Metaspace::using_class_space()) { 809 out->print_cr("No Class Space."); 810 return; 811 } 812 out->print_raw("---- Metaspace Map (Class Space) ----"); 813 } else { 814 out->print_raw("---- Metaspace Map (Non-Class Space) ----"); 815 } 816 // Print legend: 817 out->cr(); 818 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous."); 819 out->cr(); 820 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); 821 vsl->print_map(out); 822 out->cr(); 823 } 824 } 825 826 void MetaspaceUtils::verify_free_chunks() { 827 Metaspace::chunk_manager_metadata()->verify(); 828 if (Metaspace::using_class_space()) { 829 Metaspace::chunk_manager_class()->verify(); 830 } 831 } 832 833 void MetaspaceUtils::verify_metrics() { 834 #ifdef ASSERT 835 // Please note: there are time windows where the internal counters are out of sync with 836 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk - 837 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will 838 // not be counted when iterating the CLDG. So be careful when you call this method. 839 ClassLoaderMetaspaceStatistics total_stat; 840 collect_statistics(&total_stat); 841 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals(); 842 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals(); 843 844 bool mismatch = false; 845 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) { 846 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i; 847 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals(); 848 if (capacity_words(mdtype) != chunk_stat.cap() || 849 used_words(mdtype) != chunk_stat.used() || 850 overhead_words(mdtype) != chunk_stat.overhead()) { 851 mismatch = true; 852 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype); 853 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".", 854 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype)); 855 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".", 856 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead()); 857 tty->flush(); 858 } 859 } 860 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch."); 861 #endif 862 } 863 864 865 // Metaspace methods 866 867 size_t Metaspace::_first_chunk_word_size = 0; 868 size_t Metaspace::_first_class_chunk_word_size = 0; 869 870 size_t Metaspace::_commit_alignment = 0; 871 size_t Metaspace::_reserve_alignment = 0; 872 873 VirtualSpaceList* Metaspace::_space_list = NULL; 874 VirtualSpaceList* Metaspace::_class_space_list = NULL; 875 876 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 877 ChunkManager* Metaspace::_chunk_manager_class = NULL; 878 879 #define VIRTUALSPACEMULTIPLIER 2 880 881 #ifdef _LP64 882 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 883 884 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 885 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class."); 886 // Figure out the narrow_klass_base and the narrow_klass_shift. The 887 // narrow_klass_base is the lower of the metaspace base and the cds base 888 // (if cds is enabled). The narrow_klass_shift depends on the distance 889 // between the lower base and higher address. 890 address lower_base; 891 address higher_address; 892 #if INCLUDE_CDS 893 if (UseSharedSpaces) { 894 higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 895 (address)(metaspace_base + compressed_class_space_size())); 896 lower_base = MIN2(metaspace_base, cds_base); 897 } else 898 #endif 899 { 900 higher_address = metaspace_base + compressed_class_space_size(); 901 lower_base = metaspace_base; 902 903 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 904 // If compressed class space fits in lower 32G, we don't need a base. 905 if (higher_address <= (address)klass_encoding_max) { 906 lower_base = 0; // Effectively lower base is zero. 907 } 908 } 909 910 Universe::set_narrow_klass_base(lower_base); 911 912 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See 913 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for 914 // how dump time narrow_klass_shift is set. Although, CDS can work 915 // with zero-shift mode also, to be consistent with AOT it uses 916 // LogKlassAlignmentInBytes for klass shift so archived java heap objects 917 // can be used at same time as AOT code. 918 if (!UseSharedSpaces 919 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 920 Universe::set_narrow_klass_shift(0); 921 } else { 922 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 923 } 924 AOTLoader::set_narrow_klass_shift(); 925 } 926 927 #if INCLUDE_CDS 928 // Return TRUE if the specified metaspace_base and cds_base are close enough 929 // to work with compressed klass pointers. 930 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 931 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 932 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 933 address lower_base = MIN2((address)metaspace_base, cds_base); 934 address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), 935 (address)(metaspace_base + compressed_class_space_size())); 936 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 937 } 938 #endif 939 940 // Try to allocate the metaspace at the requested addr. 941 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 942 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class."); 943 assert(using_class_space(), "called improperly"); 944 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 945 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 946 "Metaspace size is too big"); 947 assert_is_aligned(requested_addr, _reserve_alignment); 948 assert_is_aligned(cds_base, _reserve_alignment); 949 assert_is_aligned(compressed_class_space_size(), _reserve_alignment); 950 951 // Don't use large pages for the class space. 952 bool large_pages = false; 953 954 #if !(defined(AARCH64) || defined(AIX)) 955 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 956 _reserve_alignment, 957 large_pages, 958 requested_addr); 959 #else // AARCH64 960 ReservedSpace metaspace_rs; 961 962 // Our compressed klass pointers may fit nicely into the lower 32 963 // bits. 964 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 965 metaspace_rs = ReservedSpace(compressed_class_space_size(), 966 _reserve_alignment, 967 large_pages, 968 requested_addr); 969 } 970 971 if (! metaspace_rs.is_reserved()) { 972 // Aarch64: Try to align metaspace so that we can decode a compressed 973 // klass with a single MOVK instruction. We can do this iff the 974 // compressed class base is a multiple of 4G. 975 // Aix: Search for a place where we can find memory. If we need to load 976 // the base, 4G alignment is helpful, too. 977 size_t increment = AARCH64_ONLY(4*)G; 978 for (char *a = align_up(requested_addr, increment); 979 a < (char*)(1024*G); 980 a += increment) { 981 if (a == (char *)(32*G)) { 982 // Go faster from here on. Zero-based is no longer possible. 983 increment = 4*G; 984 } 985 986 #if INCLUDE_CDS 987 if (UseSharedSpaces 988 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 989 // We failed to find an aligned base that will reach. Fall 990 // back to using our requested addr. 991 metaspace_rs = ReservedSpace(compressed_class_space_size(), 992 _reserve_alignment, 993 large_pages, 994 requested_addr); 995 break; 996 } 997 #endif 998 999 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1000 _reserve_alignment, 1001 large_pages, 1002 a); 1003 if (metaspace_rs.is_reserved()) 1004 break; 1005 } 1006 } 1007 1008 #endif // AARCH64 1009 1010 if (!metaspace_rs.is_reserved()) { 1011 #if INCLUDE_CDS 1012 if (UseSharedSpaces) { 1013 size_t increment = align_up(1*G, _reserve_alignment); 1014 1015 // Keep trying to allocate the metaspace, increasing the requested_addr 1016 // by 1GB each time, until we reach an address that will no longer allow 1017 // use of CDS with compressed klass pointers. 1018 char *addr = requested_addr; 1019 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 1020 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 1021 addr = addr + increment; 1022 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1023 _reserve_alignment, large_pages, addr); 1024 } 1025 } 1026 #endif 1027 // If no successful allocation then try to allocate the space anywhere. If 1028 // that fails then OOM doom. At this point we cannot try allocating the 1029 // metaspace as if UseCompressedClassPointers is off because too much 1030 // initialization has happened that depends on UseCompressedClassPointers. 1031 // So, UseCompressedClassPointers cannot be turned off at this point. 1032 if (!metaspace_rs.is_reserved()) { 1033 metaspace_rs = ReservedSpace(compressed_class_space_size(), 1034 _reserve_alignment, large_pages); 1035 if (!metaspace_rs.is_reserved()) { 1036 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 1037 compressed_class_space_size())); 1038 } 1039 } 1040 } 1041 1042 // If we got here then the metaspace got allocated. 1043 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 1044 1045 #if INCLUDE_CDS 1046 // Verify that we can use shared spaces. Otherwise, turn off CDS. 1047 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 1048 FileMapInfo::stop_sharing_and_unmap( 1049 "Could not allocate metaspace at a compatible address"); 1050 } 1051 #endif 1052 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 1053 UseSharedSpaces ? (address)cds_base : 0); 1054 1055 initialize_class_space(metaspace_rs); 1056 1057 LogTarget(Trace, gc, metaspace) lt; 1058 if (lt.is_enabled()) { 1059 ResourceMark rm; 1060 LogStream ls(lt); 1061 print_compressed_class_space(&ls, requested_addr); 1062 } 1063 } 1064 1065 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 1066 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 1067 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 1068 if (_class_space_list != NULL) { 1069 address base = (address)_class_space_list->current_virtual_space()->bottom(); 1070 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 1071 compressed_class_space_size(), p2i(base)); 1072 if (requested_addr != 0) { 1073 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 1074 } 1075 st->cr(); 1076 } 1077 } 1078 1079 // For UseCompressedClassPointers the class space is reserved above the top of 1080 // the Java heap. The argument passed in is at the base of the compressed space. 1081 void Metaspace::initialize_class_space(ReservedSpace rs) { 1082 // The reserved space size may be bigger because of alignment, esp with UseLargePages 1083 assert(rs.size() >= CompressedClassSpaceSize, 1084 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 1085 assert(using_class_space(), "Must be using class space"); 1086 _class_space_list = new VirtualSpaceList(rs); 1087 _chunk_manager_class = new ChunkManager(true/*is_class*/); 1088 1089 if (!_class_space_list->initialization_succeeded()) { 1090 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 1091 } 1092 } 1093 1094 #endif 1095 1096 void Metaspace::ergo_initialize() { 1097 if (DumpSharedSpaces) { 1098 // Using large pages when dumping the shared archive is currently not implemented. 1099 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 1100 } 1101 1102 size_t page_size = os::vm_page_size(); 1103 if (UseLargePages && UseLargePagesInMetaspace) { 1104 page_size = os::large_page_size(); 1105 } 1106 1107 _commit_alignment = page_size; 1108 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 1109 1110 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 1111 // override if MaxMetaspaceSize was set on the command line or not. 1112 // This information is needed later to conform to the specification of the 1113 // java.lang.management.MemoryUsage API. 1114 // 1115 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 1116 // globals.hpp to the aligned value, but this is not possible, since the 1117 // alignment depends on other flags being parsed. 1118 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); 1119 1120 if (MetaspaceSize > MaxMetaspaceSize) { 1121 MetaspaceSize = MaxMetaspaceSize; 1122 } 1123 1124 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); 1125 1126 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 1127 1128 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); 1129 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 1130 1131 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 1132 1133 // Initial virtual space size will be calculated at global_initialize() 1134 size_t min_metaspace_sz = 1135 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize; 1136 if (UseCompressedClassPointers) { 1137 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) { 1138 if (min_metaspace_sz >= MaxMetaspaceSize) { 1139 vm_exit_during_initialization("MaxMetaspaceSize is too small."); 1140 } else { 1141 FLAG_SET_ERGO(size_t, CompressedClassSpaceSize, 1142 MaxMetaspaceSize - min_metaspace_sz); 1143 } 1144 } 1145 } else if (min_metaspace_sz >= MaxMetaspaceSize) { 1146 FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize, 1147 min_metaspace_sz); 1148 } 1149 1150 set_compressed_class_space_size(CompressedClassSpaceSize); 1151 } 1152 1153 void Metaspace::global_initialize() { 1154 MetaspaceGC::initialize(); 1155 1156 #if INCLUDE_CDS 1157 if (DumpSharedSpaces) { 1158 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces(); 1159 } else if (UseSharedSpaces) { 1160 // If any of the archived space fails to map, UseSharedSpaces 1161 // is reset to false. Fall through to the 1162 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class 1163 // metaspace. 1164 MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); 1165 } 1166 1167 if (!DumpSharedSpaces && !UseSharedSpaces) 1168 #endif // INCLUDE_CDS 1169 { 1170 #ifdef _LP64 1171 if (using_class_space()) { 1172 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 1173 allocate_metaspace_compressed_klass_ptrs(base, 0); 1174 } 1175 #endif // _LP64 1176 } 1177 1178 // Initialize these before initializing the VirtualSpaceList 1179 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 1180 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 1181 // Make the first class chunk bigger than a medium chunk so it's not put 1182 // on the medium chunk list. The next chunk will be small and progress 1183 // from there. This size calculated by -version. 1184 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 1185 (CompressedClassSpaceSize/BytesPerWord)*2); 1186 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 1187 // Arbitrarily set the initial virtual space to a multiple 1188 // of the boot class loader size. 1189 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 1190 word_size = align_up(word_size, Metaspace::reserve_alignment_words()); 1191 1192 // Initialize the list of virtual spaces. 1193 _space_list = new VirtualSpaceList(word_size); 1194 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/); 1195 1196 if (!_space_list->initialization_succeeded()) { 1197 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 1198 } 1199 1200 _tracer = new MetaspaceTracer(); 1201 } 1202 1203 void Metaspace::post_initialize() { 1204 MetaspaceGC::post_initialize(); 1205 } 1206 1207 void Metaspace::verify_global_initialization() { 1208 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); 1209 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized"); 1210 1211 if (using_class_space()) { 1212 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized"); 1213 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized"); 1214 } 1215 } 1216 1217 size_t Metaspace::align_word_size_up(size_t word_size) { 1218 size_t byte_size = word_size * wordSize; 1219 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 1220 } 1221 1222 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 1223 MetaspaceObj::Type type, TRAPS) { 1224 assert(!_frozen, "sanity"); 1225 if (HAS_PENDING_EXCEPTION) { 1226 assert(false, "Should not allocate with exception pending"); 1227 return NULL; // caller does a CHECK_NULL too 1228 } 1229 1230 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 1231 "ClassLoaderData::the_null_class_loader_data() should have been used."); 1232 1233 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 1234 1235 // Try to allocate metadata. 1236 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 1237 1238 if (result == NULL) { 1239 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 1240 1241 // Allocation failed. 1242 if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) { 1243 // Only start a GC if the bootstrapping has completed. 1244 // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside 1245 // the VM thread. 1246 1247 // Try to clean out some memory and retry. 1248 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype); 1249 } 1250 } 1251 1252 if (result == NULL) { 1253 if (DumpSharedSpaces) { 1254 // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM. 1255 // We should abort to avoid generating a potentially bad archive. 1256 tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.", 1257 MetaspaceObj::type_name(type), word_size * BytesPerWord); 1258 tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize); 1259 vm_exit(1); 1260 } 1261 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 1262 } 1263 1264 // Zero initialize. 1265 Copy::fill_to_words((HeapWord*)result, word_size, 0); 1266 1267 return result; 1268 } 1269 1270 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 1271 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 1272 1273 // If result is still null, we are out of memory. 1274 Log(gc, metaspace, freelist) log; 1275 if (log.is_info()) { 1276 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 1277 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 1278 ResourceMark rm; 1279 if (log.is_debug()) { 1280 if (loader_data->metaspace_or_null() != NULL) { 1281 LogStream ls(log.debug()); 1282 loader_data->print_value_on(&ls); 1283 } 1284 } 1285 LogStream ls(log.info()); 1286 // In case of an OOM, log out a short but still useful report. 1287 MetaspaceUtils::print_basic_report(&ls, 0); 1288 } 1289 1290 bool out_of_compressed_class_space = false; 1291 if (is_class_space_allocation(mdtype)) { 1292 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null(); 1293 out_of_compressed_class_space = 1294 MetaspaceUtils::committed_bytes(Metaspace::ClassType) + 1295 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 1296 CompressedClassSpaceSize; 1297 } 1298 1299 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 1300 const char* space_string = out_of_compressed_class_space ? 1301 "Compressed class space" : "Metaspace"; 1302 1303 report_java_out_of_memory(space_string); 1304 1305 if (JvmtiExport::should_post_resource_exhausted()) { 1306 JvmtiExport::post_resource_exhausted( 1307 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 1308 space_string); 1309 } 1310 1311 if (!is_init_completed()) { 1312 vm_exit_during_initialization("OutOfMemoryError", space_string); 1313 } 1314 1315 if (out_of_compressed_class_space) { 1316 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 1317 } else { 1318 THROW_OOP(Universe::out_of_memory_error_metaspace()); 1319 } 1320 } 1321 1322 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 1323 switch (mdtype) { 1324 case Metaspace::ClassType: return "Class"; 1325 case Metaspace::NonClassType: return "Metadata"; 1326 default: 1327 assert(false, "Got bad mdtype: %d", (int) mdtype); 1328 return NULL; 1329 } 1330 } 1331 1332 void Metaspace::purge(MetadataType mdtype) { 1333 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 1334 } 1335 1336 void Metaspace::purge() { 1337 MutexLockerEx cl(MetaspaceExpand_lock, 1338 Mutex::_no_safepoint_check_flag); 1339 purge(NonClassType); 1340 if (using_class_space()) { 1341 purge(ClassType); 1342 } 1343 } 1344 1345 bool Metaspace::contains(const void* ptr) { 1346 if (MetaspaceShared::is_in_shared_metaspace(ptr)) { 1347 return true; 1348 } 1349 return contains_non_shared(ptr); 1350 } 1351 1352 bool Metaspace::contains_non_shared(const void* ptr) { 1353 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 1354 return true; 1355 } 1356 1357 return get_space_list(NonClassType)->contains(ptr); 1358 } 1359 1360 // ClassLoaderMetaspace 1361 1362 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) 1363 : _lock(lock) 1364 , _space_type(type) 1365 , _vsm(NULL) 1366 , _class_vsm(NULL) 1367 { 1368 initialize(lock, type); 1369 } 1370 1371 ClassLoaderMetaspace::~ClassLoaderMetaspace() { 1372 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths)); 1373 delete _vsm; 1374 if (Metaspace::using_class_space()) { 1375 delete _class_vsm; 1376 } 1377 } 1378 1379 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { 1380 Metachunk* chunk = get_initialization_chunk(type, mdtype); 1381 if (chunk != NULL) { 1382 // Add to this manager's list of chunks in use and make it the current_chunk(). 1383 get_space_manager(mdtype)->add_chunk(chunk, true); 1384 } 1385 } 1386 1387 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { 1388 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); 1389 1390 // Get a chunk from the chunk freelist 1391 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 1392 1393 if (chunk == NULL) { 1394 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size, 1395 get_space_manager(mdtype)->medium_chunk_bunch()); 1396 } 1397 1398 return chunk; 1399 } 1400 1401 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) { 1402 Metaspace::verify_global_initialization(); 1403 1404 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births)); 1405 1406 // Allocate SpaceManager for metadata objects. 1407 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock); 1408 1409 if (Metaspace::using_class_space()) { 1410 // Allocate SpaceManager for classes. 1411 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock); 1412 } 1413 1414 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 1415 1416 // Allocate chunk for metadata objects 1417 initialize_first_chunk(type, Metaspace::NonClassType); 1418 1419 // Allocate chunk for class metadata objects 1420 if (Metaspace::using_class_space()) { 1421 initialize_first_chunk(type, Metaspace::ClassType); 1422 } 1423 } 1424 1425 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) { 1426 Metaspace::assert_not_frozen(); 1427 1428 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs)); 1429 1430 // Don't use class_vsm() unless UseCompressedClassPointers is true. 1431 if (Metaspace::is_class_space_allocation(mdtype)) { 1432 return class_vsm()->allocate(word_size); 1433 } else { 1434 return vsm()->allocate(word_size); 1435 } 1436 } 1437 1438 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) { 1439 Metaspace::assert_not_frozen(); 1440 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 1441 assert(delta_bytes > 0, "Must be"); 1442 1443 size_t before = 0; 1444 size_t after = 0; 1445 MetaWord* res; 1446 bool incremented; 1447 1448 // Each thread increments the HWM at most once. Even if the thread fails to increment 1449 // the HWM, an allocation is still attempted. This is because another thread must then 1450 // have incremented the HWM and therefore the allocation might still succeed. 1451 do { 1452 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 1453 res = allocate(word_size, mdtype); 1454 } while (!incremented && res == NULL); 1455 1456 if (incremented) { 1457 Metaspace::tracer()->report_gc_threshold(before, after, 1458 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 1459 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 1460 } 1461 1462 return res; 1463 } 1464 1465 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const { 1466 return (vsm()->used_words() + 1467 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord; 1468 } 1469 1470 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const { 1471 return (vsm()->capacity_words() + 1472 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord; 1473 } 1474 1475 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 1476 Metaspace::assert_not_frozen(); 1477 assert(!SafepointSynchronize::is_at_safepoint() 1478 || Thread::current()->is_VM_thread(), "should be the VM thread"); 1479 1480 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs)); 1481 1482 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 1483 1484 if (is_class && Metaspace::using_class_space()) { 1485 class_vsm()->deallocate(ptr, word_size); 1486 } else { 1487 vsm()->deallocate(ptr, word_size); 1488 } 1489 } 1490 1491 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) { 1492 assert(Metaspace::using_class_space(), "Has to use class space"); 1493 return class_vsm()->calc_chunk_size(word_size); 1494 } 1495 1496 void ClassLoaderMetaspace::print_on(outputStream* out) const { 1497 // Print both class virtual space counts and metaspace. 1498 if (Verbose) { 1499 vsm()->print_on(out); 1500 if (Metaspace::using_class_space()) { 1501 class_vsm()->print_on(out); 1502 } 1503 } 1504 } 1505 1506 void ClassLoaderMetaspace::verify() { 1507 vsm()->verify(); 1508 if (Metaspace::using_class_space()) { 1509 class_vsm()->verify(); 1510 } 1511 } 1512 1513 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const { 1514 assert_lock_strong(lock()); 1515 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats()); 1516 if (Metaspace::using_class_space()) { 1517 class_vsm()->add_to_statistics_locked(&out->class_sm_stats()); 1518 } 1519 } 1520 1521 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const { 1522 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1523 add_to_statistics_locked(out); 1524 } 1525 1526 /////////////// Unit tests /////////////// 1527 1528 #ifndef PRODUCT 1529 1530 class TestMetaspaceUtilsTest : AllStatic { 1531 public: 1532 static void test_reserved() { 1533 size_t reserved = MetaspaceUtils::reserved_bytes(); 1534 1535 assert(reserved > 0, "assert"); 1536 1537 size_t committed = MetaspaceUtils::committed_bytes(); 1538 assert(committed <= reserved, "assert"); 1539 1540 size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType); 1541 assert(reserved_metadata > 0, "assert"); 1542 assert(reserved_metadata <= reserved, "assert"); 1543 1544 if (UseCompressedClassPointers) { 1545 size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType); 1546 assert(reserved_class > 0, "assert"); 1547 assert(reserved_class < reserved, "assert"); 1548 } 1549 } 1550 1551 static void test_committed() { 1552 size_t committed = MetaspaceUtils::committed_bytes(); 1553 1554 assert(committed > 0, "assert"); 1555 1556 size_t reserved = MetaspaceUtils::reserved_bytes(); 1557 assert(committed <= reserved, "assert"); 1558 1559 size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType); 1560 assert(committed_metadata > 0, "assert"); 1561 assert(committed_metadata <= committed, "assert"); 1562 1563 if (UseCompressedClassPointers) { 1564 size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType); 1565 assert(committed_class > 0, "assert"); 1566 assert(committed_class < committed, "assert"); 1567 } 1568 } 1569 1570 static void test_virtual_space_list_large_chunk() { 1571 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 1572 MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 1573 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 1574 // vm_allocation_granularity aligned on Windows. 1575 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 1576 large_size += (os::vm_page_size()/BytesPerWord); 1577 vs_list->get_new_chunk(large_size, 0); 1578 } 1579 1580 static void test() { 1581 test_reserved(); 1582 test_committed(); 1583 test_virtual_space_list_large_chunk(); 1584 } 1585 }; 1586 1587 void TestMetaspaceUtils_test() { 1588 TestMetaspaceUtilsTest::test(); 1589 } 1590 1591 class TestVirtualSpaceNodeTest { 1592 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 1593 size_t& num_small_chunks, 1594 size_t& num_specialized_chunks) { 1595 num_medium_chunks = words_left / MediumChunk; 1596 words_left = words_left % MediumChunk; 1597 1598 num_small_chunks = words_left / SmallChunk; 1599 words_left = words_left % SmallChunk; 1600 // how many specialized chunks can we get? 1601 num_specialized_chunks = words_left / SpecializedChunk; 1602 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 1603 } 1604 1605 public: 1606 static void test() { 1607 MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 1608 const size_t vsn_test_size_words = MediumChunk * 4; 1609 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 1610 1611 // The chunk sizes must be multiples of eachother, or this will fail 1612 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 1613 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 1614 1615 { // No committed memory in VSN 1616 ChunkManager cm(false); 1617 VirtualSpaceNode vsn(false, vsn_test_size_bytes); 1618 vsn.initialize(); 1619 vsn.retire(&cm); 1620 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 1621 } 1622 1623 { // All of VSN is committed, half is used by chunks 1624 ChunkManager cm(false); 1625 VirtualSpaceNode vsn(false, vsn_test_size_bytes); 1626 vsn.initialize(); 1627 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 1628 vsn.get_chunk_vs(MediumChunk); 1629 vsn.get_chunk_vs(MediumChunk); 1630 vsn.retire(&cm); 1631 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 1632 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 1633 } 1634 1635 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 1636 // This doesn't work for systems with vm_page_size >= 16K. 1637 if (page_chunks < MediumChunk) { 1638 // 4 pages of VSN is committed, some is used by chunks 1639 ChunkManager cm(false); 1640 VirtualSpaceNode vsn(false, vsn_test_size_bytes); 1641 1642 vsn.initialize(); 1643 vsn.expand_by(page_chunks, page_chunks); 1644 vsn.get_chunk_vs(SmallChunk); 1645 vsn.get_chunk_vs(SpecializedChunk); 1646 vsn.retire(&cm); 1647 1648 // committed - used = words left to retire 1649 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 1650 1651 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 1652 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 1653 1654 assert(num_medium_chunks == 0, "should not get any medium chunks"); 1655 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 1656 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 1657 } 1658 1659 { // Half of VSN is committed, a humongous chunk is used 1660 ChunkManager cm(false); 1661 VirtualSpaceNode vsn(false, vsn_test_size_bytes); 1662 vsn.initialize(); 1663 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 1664 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 1665 vsn.retire(&cm); 1666 1667 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 1668 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 1669 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 1670 1671 assert(num_medium_chunks == 0, "should not get any medium chunks"); 1672 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 1673 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 1674 } 1675 1676 } 1677 1678 #define assert_is_available_positive(word_size) \ 1679 assert(vsn.is_available(word_size), \ 1680 #word_size ": " PTR_FORMAT " bytes were not available in " \ 1681 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 1682 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 1683 1684 #define assert_is_available_negative(word_size) \ 1685 assert(!vsn.is_available(word_size), \ 1686 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 1687 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 1688 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 1689 1690 static void test_is_available_positive() { 1691 // Reserve some memory. 1692 VirtualSpaceNode vsn(false, os::vm_allocation_granularity()); 1693 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 1694 1695 // Commit some memory. 1696 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 1697 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 1698 assert(expanded, "Failed to commit"); 1699 1700 // Check that is_available accepts the committed size. 1701 assert_is_available_positive(commit_word_size); 1702 1703 // Check that is_available accepts half the committed size. 1704 size_t expand_word_size = commit_word_size / 2; 1705 assert_is_available_positive(expand_word_size); 1706 } 1707 1708 static void test_is_available_negative() { 1709 // Reserve some memory. 1710 VirtualSpaceNode vsn(false, os::vm_allocation_granularity()); 1711 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 1712 1713 // Commit some memory. 1714 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 1715 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 1716 assert(expanded, "Failed to commit"); 1717 1718 // Check that is_available doesn't accept a too large size. 1719 size_t two_times_commit_word_size = commit_word_size * 2; 1720 assert_is_available_negative(two_times_commit_word_size); 1721 } 1722 1723 static void test_is_available_overflow() { 1724 // Reserve some memory. 1725 VirtualSpaceNode vsn(false, os::vm_allocation_granularity()); 1726 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 1727 1728 // Commit some memory. 1729 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 1730 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 1731 assert(expanded, "Failed to commit"); 1732 1733 // Calculate a size that will overflow the virtual space size. 1734 void* virtual_space_max = (void*)(uintptr_t)-1; 1735 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 1736 size_t overflow_size = bottom_to_max + BytesPerWord; 1737 size_t overflow_word_size = overflow_size / BytesPerWord; 1738 1739 // Check that is_available can handle the overflow. 1740 assert_is_available_negative(overflow_word_size); 1741 } 1742 1743 static void test_is_available() { 1744 TestVirtualSpaceNodeTest::test_is_available_positive(); 1745 TestVirtualSpaceNodeTest::test_is_available_negative(); 1746 TestVirtualSpaceNodeTest::test_is_available_overflow(); 1747 } 1748 }; 1749 1750 #endif // !PRODUCT 1751 1752 struct chunkmanager_statistics_t { 1753 int num_specialized_chunks; 1754 int num_small_chunks; 1755 int num_medium_chunks; 1756 int num_humongous_chunks; 1757 }; 1758 1759 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) { 1760 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType); 1761 ChunkManagerStatistics stat; 1762 chunk_manager->collect_statistics(&stat); 1763 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num(); 1764 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num(); 1765 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num(); 1766 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num(); 1767 } 1768 1769 struct chunk_geometry_t { 1770 size_t specialized_chunk_word_size; 1771 size_t small_chunk_word_size; 1772 size_t medium_chunk_word_size; 1773 }; 1774 1775 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) { 1776 if (mdType == Metaspace::NonClassType) { 1777 out->specialized_chunk_word_size = SpecializedChunk; 1778 out->small_chunk_word_size = SmallChunk; 1779 out->medium_chunk_word_size = MediumChunk; 1780 } else { 1781 out->specialized_chunk_word_size = ClassSpecializedChunk; 1782 out->small_chunk_word_size = ClassSmallChunk; 1783 out->medium_chunk_word_size = ClassMediumChunk; 1784 } 1785 }