1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "aot/aotLoader.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/filemap.hpp"
  31 #include "memory/metaspace.hpp"
  32 #include "memory/metaspace/chunkManager.hpp"
  33 #include "memory/metaspace/metachunk.hpp"
  34 #include "memory/metaspace/metaspaceCommon.hpp"
  35 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
  36 #include "memory/metaspace/spaceManager.hpp"
  37 #include "memory/metaspace/virtualSpaceList.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/orderAccess.inline.hpp"
  42 #include "services/memTracker.hpp"
  43 #include "utilities/copy.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 
  48 using namespace metaspace;
  49 
  50 MetaWord* last_allocated = 0;
  51 
  52 size_t Metaspace::_compressed_class_space_size;
  53 const MetaspaceTracer* Metaspace::_tracer = NULL;
  54 
  55 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  56 
  57 static const char* space_type_name(Metaspace::MetaspaceType t) {
  58   const char* s = NULL;
  59   switch (t) {
  60     case Metaspace::StandardMetaspaceType: s = "Standard"; break;
  61     case Metaspace::BootMetaspaceType: s = "Boot"; break;
  62     case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
  63     case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
  64     default: ShouldNotReachHere();
  65   }
  66   return s;
  67 }
  68 
  69 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
  70 uint MetaspaceGC::_shrink_factor = 0;
  71 bool MetaspaceGC::_should_concurrent_collect = false;
  72 
  73 // BlockFreelist methods
  74 
  75 // VirtualSpaceNode methods
  76 
  77 // MetaspaceGC methods
  78 
  79 // VM_CollectForMetadataAllocation is the vm operation used to GC.
  80 // Within the VM operation after the GC the attempt to allocate the metadata
  81 // should succeed.  If the GC did not free enough space for the metaspace
  82 // allocation, the HWM is increased so that another virtualspace will be
  83 // allocated for the metadata.  With perm gen the increase in the perm
  84 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  85 // metaspace policy uses those as the small and large steps for the HWM.
  86 //
  87 // After the GC the compute_new_size() for MetaspaceGC is called to
  88 // resize the capacity of the metaspaces.  The current implementation
  89 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
  90 // to resize the Java heap by some GC's.  New flags can be implemented
  91 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
  92 // free space is desirable in the metaspace capacity to decide how much
  93 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
  94 // free space is desirable in the metaspace capacity before decreasing
  95 // the HWM.
  96 
  97 // Calculate the amount to increase the high water mark (HWM).
  98 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  99 // another expansion is not requested too soon.  If that is not
 100 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 101 // If that is still not enough, expand by the size of the allocation
 102 // plus some.
 103 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 104   size_t min_delta = MinMetaspaceExpansion;
 105   size_t max_delta = MaxMetaspaceExpansion;
 106   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 107 
 108   if (delta <= min_delta) {
 109     delta = min_delta;
 110   } else if (delta <= max_delta) {
 111     // Don't want to hit the high water mark on the next
 112     // allocation so make the delta greater than just enough
 113     // for this allocation.
 114     delta = max_delta;
 115   } else {
 116     // This allocation is large but the next ones are probably not
 117     // so increase by the minimum.
 118     delta = delta + min_delta;
 119   }
 120 
 121   assert_is_aligned(delta, Metaspace::commit_alignment());
 122 
 123   return delta;
 124 }
 125 
 126 size_t MetaspaceGC::capacity_until_GC() {
 127   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
 128   assert(value >= MetaspaceSize, "Not initialized properly?");
 129   return value;
 130 }
 131 
 132 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
 133   assert_is_aligned(v, Metaspace::commit_alignment());
 134 
 135   size_t old_capacity_until_GC = _capacity_until_GC;
 136   size_t new_value = old_capacity_until_GC + v;
 137 
 138   if (new_value < old_capacity_until_GC) {
 139     // The addition wrapped around, set new_value to aligned max value.
 140     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 141   }
 142 
 143   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 144 
 145   if (old_capacity_until_GC != prev_value) {
 146     return false;
 147   }
 148 
 149   if (new_cap_until_GC != NULL) {
 150     *new_cap_until_GC = new_value;
 151   }
 152   if (old_cap_until_GC != NULL) {
 153     *old_cap_until_GC = old_capacity_until_GC;
 154   }
 155   return true;
 156 }
 157 
 158 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 159   assert_is_aligned(v, Metaspace::commit_alignment());
 160 
 161   return Atomic::sub(v, &_capacity_until_GC);
 162 }
 163 
 164 void MetaspaceGC::initialize() {
 165   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 166   // we can't do a GC during initialization.
 167   _capacity_until_GC = MaxMetaspaceSize;
 168 }
 169 
 170 void MetaspaceGC::post_initialize() {
 171   // Reset the high-water mark once the VM initialization is done.
 172   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 173 }
 174 
 175 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 176   // Check if the compressed class space is full.
 177   if (is_class && Metaspace::using_class_space()) {
 178     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 179     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 180       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
 181                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 182       return false;
 183     }
 184   }
 185 
 186   // Check if the user has imposed a limit on the metaspace memory.
 187   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 188   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 189     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
 190               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 191     return false;
 192   }
 193 
 194   return true;
 195 }
 196 
 197 size_t MetaspaceGC::allowed_expansion() {
 198   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 199   size_t capacity_until_gc = capacity_until_GC();
 200 
 201   assert(capacity_until_gc >= committed_bytes,
 202          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
 203          capacity_until_gc, committed_bytes);
 204 
 205   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 206   size_t left_until_GC = capacity_until_gc - committed_bytes;
 207   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 208   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
 209             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
 210             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 211 
 212   return left_to_commit / BytesPerWord;
 213 }
 214 
 215 void MetaspaceGC::compute_new_size() {
 216   assert(_shrink_factor <= 100, "invalid shrink factor");
 217   uint current_shrink_factor = _shrink_factor;
 218   _shrink_factor = 0;
 219 
 220   // Using committed_bytes() for used_after_gc is an overestimation, since the
 221   // chunk free lists are included in committed_bytes() and the memory in an
 222   // un-fragmented chunk free list is available for future allocations.
 223   // However, if the chunk free lists becomes fragmented, then the memory may
 224   // not be available for future allocations and the memory is therefore "in use".
 225   // Including the chunk free lists in the definition of "in use" is therefore
 226   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 227   // shrink below committed_bytes() and this has caused serious bugs in the past.
 228   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 229   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 230 
 231   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 232   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 233 
 234   const double min_tmp = used_after_gc / maximum_used_percentage;
 235   size_t minimum_desired_capacity =
 236     (size_t)MIN2(min_tmp, double(max_uintx));
 237   // Don't shrink less than the initial generation size
 238   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 239                                   MetaspaceSize);
 240 
 241   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 242   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 243                            minimum_free_percentage, maximum_used_percentage);
 244   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 245 
 246 
 247   size_t shrink_bytes = 0;
 248   if (capacity_until_GC < minimum_desired_capacity) {
 249     // If we have less capacity below the metaspace HWM, then
 250     // increment the HWM.
 251     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 252     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 253     // Don't expand unless it's significant
 254     if (expand_bytes >= MinMetaspaceExpansion) {
 255       size_t new_capacity_until_GC = 0;
 256       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 257       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
 258 
 259       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 260                                                new_capacity_until_GC,
 261                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 262       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 263                                minimum_desired_capacity / (double) K,
 264                                expand_bytes / (double) K,
 265                                MinMetaspaceExpansion / (double) K,
 266                                new_capacity_until_GC / (double) K);
 267     }
 268     return;
 269   }
 270 
 271   // No expansion, now see if we want to shrink
 272   // We would never want to shrink more than this
 273   assert(capacity_until_GC >= minimum_desired_capacity,
 274          SIZE_FORMAT " >= " SIZE_FORMAT,
 275          capacity_until_GC, minimum_desired_capacity);
 276   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 277 
 278   // Should shrinking be considered?
 279   if (MaxMetaspaceFreeRatio < 100) {
 280     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 281     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 282     const double max_tmp = used_after_gc / minimum_used_percentage;
 283     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 284     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 285                                     MetaspaceSize);
 286     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 287                              maximum_free_percentage, minimum_used_percentage);
 288     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 289                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 290 
 291     assert(minimum_desired_capacity <= maximum_desired_capacity,
 292            "sanity check");
 293 
 294     if (capacity_until_GC > maximum_desired_capacity) {
 295       // Capacity too large, compute shrinking size
 296       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 297       // We don't want shrink all the way back to initSize if people call
 298       // System.gc(), because some programs do that between "phases" and then
 299       // we'd just have to grow the heap up again for the next phase.  So we
 300       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 301       // on the third call, and 100% by the fourth call.  But if we recompute
 302       // size without shrinking, it goes back to 0%.
 303       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 304 
 305       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 306 
 307       assert(shrink_bytes <= max_shrink_bytes,
 308              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
 309              shrink_bytes, max_shrink_bytes);
 310       if (current_shrink_factor == 0) {
 311         _shrink_factor = 10;
 312       } else {
 313         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 314       }
 315       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 316                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
 317       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 318                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
 319     }
 320   }
 321 
 322   // Don't shrink unless it's significant
 323   if (shrink_bytes >= MinMetaspaceExpansion &&
 324       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 325     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 326     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 327                                              new_capacity_until_GC,
 328                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 329   }
 330 }
 331 
 332 // MetaspaceUtils
 333 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
 334 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
 335 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
 336 
 337 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
 338 // output will be the accumulated values for all live metaspaces.
 339 // Note: method does not do any locking.
 340 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
 341   out->reset();
 342   ClassLoaderDataGraphMetaspaceIterator iter;
 343    while (iter.repeat()) {
 344      ClassLoaderMetaspace* msp = iter.get_next();
 345      if (msp != NULL) {
 346        msp->add_to_statistics(out);
 347      }
 348    }
 349 }
 350 
 351 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
 352   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
 353   return list == NULL ? 0 : list->free_bytes();
 354 }
 355 
 356 size_t MetaspaceUtils::free_in_vs_bytes() {
 357   return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
 358 }
 359 
 360 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
 361   assert_lock_strong(MetaspaceExpand_lock);
 362   (*pstat) += words;
 363 }
 364 
 365 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
 366   assert_lock_strong(MetaspaceExpand_lock);
 367   const size_t size_now = *pstat;
 368   assert(size_now >= words, "About to decrement counter below zero "
 369          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
 370          size_now, words);
 371   *pstat = size_now - words;
 372 }
 373 
 374 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
 375   Atomic::add(words, pstat);
 376 }
 377 
 378 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
 379   const size_t size_now = *pstat;
 380   assert(size_now >= words, "About to decrement counter below zero "
 381          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
 382          size_now, words);
 383   Atomic::sub(words, pstat);
 384 }
 385 
 386 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
 387   dec_stat_nonatomically(&_capacity_words[mdtype], words);
 388 }
 389 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
 390   inc_stat_nonatomically(&_capacity_words[mdtype], words);
 391 }
 392 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
 393   dec_stat_atomically(&_used_words[mdtype], words);
 394 }
 395 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
 396   inc_stat_atomically(&_used_words[mdtype], words);
 397 }
 398 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
 399   dec_stat_nonatomically(&_overhead_words[mdtype], words);
 400 }
 401 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
 402   inc_stat_nonatomically(&_overhead_words[mdtype], words);
 403 }
 404 
 405 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
 406   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
 407   return list == NULL ? 0 : list->reserved_bytes();
 408 }
 409 
 410 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
 411   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
 412   return list == NULL ? 0 : list->committed_bytes();
 413 }
 414 
 415 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
 416 
 417 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
 418   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
 419   if (chunk_manager == NULL) {
 420     return 0;
 421   }
 422   chunk_manager->slow_verify();
 423   return chunk_manager->free_chunks_total_words();
 424 }
 425 
 426 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
 427   return free_chunks_total_words(mdtype) * BytesPerWord;
 428 }
 429 
 430 size_t MetaspaceUtils::free_chunks_total_words() {
 431   return free_chunks_total_words(Metaspace::ClassType) +
 432          free_chunks_total_words(Metaspace::NonClassType);
 433 }
 434 
 435 size_t MetaspaceUtils::free_chunks_total_bytes() {
 436   return free_chunks_total_words() * BytesPerWord;
 437 }
 438 
 439 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
 440   return Metaspace::get_chunk_manager(mdtype) != NULL;
 441 }
 442 
 443 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
 444   if (!has_chunk_free_list(mdtype)) {
 445     return MetaspaceChunkFreeListSummary();
 446   }
 447 
 448   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
 449   return cm->chunk_free_list_summary();
 450 }
 451 
 452 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
 453   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
 454                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
 455 }
 456 
 457 void MetaspaceUtils::print_on(outputStream* out) {
 458   Metaspace::MetadataType nct = Metaspace::NonClassType;
 459 
 460   out->print_cr(" Metaspace       "
 461                 "used "      SIZE_FORMAT "K, "
 462                 "capacity "  SIZE_FORMAT "K, "
 463                 "committed " SIZE_FORMAT "K, "
 464                 "reserved "  SIZE_FORMAT "K",
 465                 used_bytes()/K,
 466                 capacity_bytes()/K,
 467                 committed_bytes()/K,
 468                 reserved_bytes()/K);
 469 
 470   if (Metaspace::using_class_space()) {
 471     Metaspace::MetadataType ct = Metaspace::ClassType;
 472     out->print_cr("  class space    "
 473                   "used "      SIZE_FORMAT "K, "
 474                   "capacity "  SIZE_FORMAT "K, "
 475                   "committed " SIZE_FORMAT "K, "
 476                   "reserved "  SIZE_FORMAT "K",
 477                   used_bytes(ct)/K,
 478                   capacity_bytes(ct)/K,
 479                   committed_bytes(ct)/K,
 480                   reserved_bytes(ct)/K);
 481   }
 482 }
 483 
 484 
 485 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
 486   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
 487   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
 488   {
 489     if (Metaspace::using_class_space()) {
 490       out->print("  Non-class space:  ");
 491     }
 492     print_scaled_words(out, reserved_nonclass_words, scale, 7);
 493     out->print(" reserved, ");
 494     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
 495     out->print_cr(" committed ");
 496 
 497     if (Metaspace::using_class_space()) {
 498       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
 499       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
 500       out->print("      Class space:  ");
 501       print_scaled_words(out, reserved_class_words, scale, 7);
 502       out->print(" reserved, ");
 503       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
 504       out->print_cr(" committed ");
 505 
 506       const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
 507       const size_t committed_words = committed_nonclass_words + committed_class_words;
 508       out->print("             Both:  ");
 509       print_scaled_words(out, reserved_words, scale, 7);
 510       out->print(" reserved, ");
 511       print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
 512       out->print_cr(" committed ");
 513     }
 514   }
 515 }
 516 
 517 // This will print out a basic metaspace usage report but
 518 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
 519 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
 520 
 521   out->cr();
 522   out->print_cr("Usage:");
 523 
 524   if (Metaspace::using_class_space()) {
 525     out->print("  Non-class:  ");
 526   }
 527 
 528   // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
 529   // MetaspaceUtils.
 530   const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
 531   const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
 532   const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
 533   const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
 534 
 535   print_scaled_words(out, cap_nc, scale, 5);
 536   out->print(" capacity, ");
 537   print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
 538   out->print(" used, ");
 539   print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
 540   out->print(" free+waste, ");
 541   print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
 542   out->print(" overhead. ");
 543   out->cr();
 544 
 545   if (Metaspace::using_class_space()) {
 546     const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
 547     const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
 548     const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
 549     const size_t free_and_waste_c = cap_c - overhead_c - used_c;
 550     out->print("      Class:  ");
 551     print_scaled_words(out, cap_c, scale, 5);
 552     out->print(" capacity, ");
 553     print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
 554     out->print(" used, ");
 555     print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
 556     out->print(" free+waste, ");
 557     print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
 558     out->print(" overhead. ");
 559     out->cr();
 560 
 561     out->print("       Both:  ");
 562     const size_t cap = cap_nc + cap_c;
 563 
 564     print_scaled_words(out, cap, scale, 5);
 565     out->print(" capacity, ");
 566     print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
 567     out->print(" used, ");
 568     print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
 569     out->print(" free+waste, ");
 570     print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
 571     out->print(" overhead. ");
 572     out->cr();
 573   }
 574 
 575   out->cr();
 576   out->print_cr("Virtual space:");
 577 
 578   print_vs(out, scale);
 579 
 580   out->cr();
 581   out->print_cr("Chunk freelists:");
 582 
 583   if (Metaspace::using_class_space()) {
 584     out->print("   Non-Class:  ");
 585   }
 586   print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
 587   out->cr();
 588   if (Metaspace::using_class_space()) {
 589     out->print("       Class:  ");
 590     print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
 591     out->cr();
 592     out->print("        Both:  ");
 593     print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words() +
 594                               Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
 595     out->cr();
 596   }
 597   out->cr();
 598 
 599 }
 600 
 601 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
 602 
 603   const bool print_loaders = (flags & rf_show_loaders) > 0;
 604   const bool print_classes = (flags & rf_show_classes) > 0;
 605   const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
 606   const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
 607 
 608   // Some report options require walking the class loader data graph.
 609   PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype);
 610   if (print_loaders) {
 611     out->cr();
 612     out->print_cr("Usage per loader:");
 613     out->cr();
 614   }
 615 
 616   ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
 617 
 618   // Print totals, broken up by space type.
 619   if (print_by_spacetype) {
 620     out->cr();
 621     out->print_cr("Usage per space type:");
 622     out->cr();
 623     for (int space_type = (int)Metaspace::ZeroMetaspaceType;
 624          space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
 625     {
 626       uintx num = cl._num_loaders_by_spacetype[space_type];
 627       out->print("%s (" UINTX_FORMAT " loader%s)%c",
 628         space_type_name((Metaspace::MetaspaceType)space_type),
 629         num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
 630       if (num > 0) {
 631         cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
 632       }
 633       out->cr();
 634     }
 635   }
 636 
 637   // Print totals for in-use data:
 638   out->cr();
 639   out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
 640       cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
 641 
 642   cl._stats_total.print_on(out, scale, print_by_chunktype);
 643 
 644   // -- Print Virtual space.
 645   out->cr();
 646   out->print_cr("Virtual space:");
 647 
 648   print_vs(out, scale);
 649 
 650   // -- Print VirtualSpaceList details.
 651   if ((flags & rf_show_vslist) > 0) {
 652     out->cr();
 653     out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
 654 
 655     if (Metaspace::using_class_space()) {
 656       out->print_cr("   Non-Class:");
 657     }
 658     Metaspace::space_list()->print_on(out, scale);
 659     if (Metaspace::using_class_space()) {
 660       out->print_cr("       Class:");
 661       Metaspace::class_space_list()->print_on(out, scale);
 662     }
 663   }
 664   out->cr();
 665 
 666   // -- Print VirtualSpaceList map.
 667   if ((flags & rf_show_vsmap) > 0) {
 668     out->cr();
 669     out->print_cr("Virtual space map:");
 670 
 671     if (Metaspace::using_class_space()) {
 672       out->print_cr("   Non-Class:");
 673     }
 674     Metaspace::space_list()->print_map(out);
 675     if (Metaspace::using_class_space()) {
 676       out->print_cr("       Class:");
 677       Metaspace::class_space_list()->print_map(out);
 678     }
 679   }
 680   out->cr();
 681 
 682   // -- Print Freelists (ChunkManager) details
 683   out->cr();
 684   out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
 685 
 686   ChunkManagerStatistics non_class_cm_stat;
 687   Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
 688 
 689   if (Metaspace::using_class_space()) {
 690     out->print_cr("   Non-Class:");
 691   }
 692   non_class_cm_stat.print_on(out, scale);
 693 
 694   if (Metaspace::using_class_space()) {
 695     ChunkManagerStatistics class_cm_stat;
 696     Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
 697     out->print_cr("       Class:");
 698     class_cm_stat.print_on(out, scale);
 699   }
 700 
 701   // As a convenience, print a summary of common waste.
 702   out->cr();
 703   out->print("Waste ");
 704   // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
 705   const size_t committed_words = committed_bytes() / BytesPerWord;
 706 
 707   out->print("(percentages refer to total committed size ");
 708   print_scaled_words(out, committed_words, scale);
 709   out->print_cr("):");
 710 
 711   // Print space committed but not yet used by any class loader
 712   const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
 713   out->print("              Committed unused: ");
 714   print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
 715   out->cr();
 716 
 717   // Print waste for in-use chunks.
 718   UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
 719   UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
 720   UsedChunksStatistics ucs_all;
 721   ucs_all.add(ucs_nonclass);
 722   ucs_all.add(ucs_class);
 723 
 724   out->print("        Waste in chunks in use: ");
 725   print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
 726   out->cr();
 727   out->print("         Free in chunks in use: ");
 728   print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
 729   out->cr();
 730   out->print("     Overhead in chunks in use: ");
 731   print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
 732   out->cr();
 733 
 734   // Print waste in free chunks.
 735   const size_t total_capacity_in_free_chunks =
 736       Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
 737      (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
 738   out->print("                In free chunks: ");
 739   print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
 740   out->cr();
 741 
 742   // Print waste in deallocated blocks.
 743   const uintx free_blocks_num =
 744       cl._stats_total.nonclass_sm_stats().free_blocks_num() +
 745       cl._stats_total.class_sm_stats().free_blocks_num();
 746   const size_t free_blocks_cap_words =
 747       cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
 748       cl._stats_total.class_sm_stats().free_blocks_cap_words();
 749   out->print("Deallocated from chunks in use: ");
 750   print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
 751   out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
 752   out->cr();
 753 
 754   // Print total waste.
 755   const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
 756       + free_blocks_cap_words + unused_words_in_vs;
 757   out->print("                       -total-: ");
 758   print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
 759   out->cr();
 760 
 761   // Print internal statistics
 762 #ifdef ASSERT
 763   out->cr();
 764   out->cr();
 765   out->print_cr("Internal statistics:");
 766   out->cr();
 767   out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
 768   out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
 769   out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
 770   out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
 771   out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
 772   out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
 773   out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
 774   out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
 775   out->cr();
 776 #endif
 777 
 778   // Print some interesting settings
 779   out->cr();
 780   out->cr();
 781   out->print("MaxMetaspaceSize: ");
 782   print_human_readable_size(out, MaxMetaspaceSize, scale);
 783   out->cr();
 784   out->print("InitialBootClassLoaderMetaspaceSize: ");
 785   print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
 786   out->cr();
 787 
 788   out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
 789   out->cr();
 790   if (Metaspace::using_class_space()) {
 791     out->print("CompressedClassSpaceSize: ");
 792     print_human_readable_size(out, CompressedClassSpaceSize, scale);
 793   }
 794 
 795   out->cr();
 796   out->cr();
 797 
 798 } // MetaspaceUtils::print_report()
 799 
 800 // Prints an ASCII representation of the given space.
 801 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
 802   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 803   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
 804   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
 805   if (vsl != NULL) {
 806     if (for_class) {
 807       if (!Metaspace::using_class_space()) {
 808         out->print_cr("No Class Space.");
 809         return;
 810       }
 811       out->print_raw("---- Metaspace Map (Class Space) ----");
 812     } else {
 813       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
 814     }
 815     // Print legend:
 816     out->cr();
 817     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
 818     out->cr();
 819     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
 820     vsl->print_map(out);
 821     out->cr();
 822   }
 823 }
 824 
 825 void MetaspaceUtils::verify_free_chunks() {
 826   Metaspace::chunk_manager_metadata()->verify();
 827   if (Metaspace::using_class_space()) {
 828     Metaspace::chunk_manager_class()->verify();
 829   }
 830 }
 831 
 832 void MetaspaceUtils::verify_metrics() {
 833 #ifdef ASSERT
 834   // Please note: there are time windows where the internal counters are out of sync with
 835   // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
 836   // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
 837   // not be counted when iterating the CLDG. So be careful when you call this method.
 838   ClassLoaderMetaspaceStatistics total_stat;
 839   collect_statistics(&total_stat);
 840   UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
 841   UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
 842 
 843   bool mismatch = false;
 844   for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
 845     Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
 846     UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
 847     if (capacity_words(mdtype) != chunk_stat.cap() ||
 848         used_words(mdtype) != chunk_stat.used() ||
 849         overhead_words(mdtype) != chunk_stat.overhead()) {
 850       mismatch = true;
 851       tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
 852       tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
 853                     capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
 854       tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
 855                     chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
 856       tty->flush();
 857     }
 858   }
 859   assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
 860 #endif
 861 }
 862 
 863 
 864 // Metaspace methods
 865 
 866 size_t Metaspace::_first_chunk_word_size = 0;
 867 size_t Metaspace::_first_class_chunk_word_size = 0;
 868 
 869 size_t Metaspace::_commit_alignment = 0;
 870 size_t Metaspace::_reserve_alignment = 0;
 871 
 872 VirtualSpaceList* Metaspace::_space_list = NULL;
 873 VirtualSpaceList* Metaspace::_class_space_list = NULL;
 874 
 875 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
 876 ChunkManager* Metaspace::_chunk_manager_class = NULL;
 877 
 878 #define VIRTUALSPACEMULTIPLIER 2
 879 
 880 #ifdef _LP64
 881 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 882 
 883 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
 884   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
 885   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
 886   // narrow_klass_base is the lower of the metaspace base and the cds base
 887   // (if cds is enabled).  The narrow_klass_shift depends on the distance
 888   // between the lower base and higher address.
 889   address lower_base;
 890   address higher_address;
 891 #if INCLUDE_CDS
 892   if (UseSharedSpaces) {
 893     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
 894                           (address)(metaspace_base + compressed_class_space_size()));
 895     lower_base = MIN2(metaspace_base, cds_base);
 896   } else
 897 #endif
 898   {
 899     higher_address = metaspace_base + compressed_class_space_size();
 900     lower_base = metaspace_base;
 901 
 902     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
 903     // If compressed class space fits in lower 32G, we don't need a base.
 904     if (higher_address <= (address)klass_encoding_max) {
 905       lower_base = 0; // Effectively lower base is zero.
 906     }
 907   }
 908 
 909   Universe::set_narrow_klass_base(lower_base);
 910 
 911   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
 912   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
 913   // how dump time narrow_klass_shift is set. Although, CDS can work
 914   // with zero-shift mode also, to be consistent with AOT it uses
 915   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
 916   // can be used at same time as AOT code.
 917   if (!UseSharedSpaces
 918       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
 919     Universe::set_narrow_klass_shift(0);
 920   } else {
 921     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
 922   }
 923   AOTLoader::set_narrow_klass_shift();
 924 }
 925 
 926 #if INCLUDE_CDS
 927 // Return TRUE if the specified metaspace_base and cds_base are close enough
 928 // to work with compressed klass pointers.
 929 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
 930   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
 931   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
 932   address lower_base = MIN2((address)metaspace_base, cds_base);
 933   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
 934                                 (address)(metaspace_base + compressed_class_space_size()));
 935   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
 936 }
 937 #endif
 938 
 939 // Try to allocate the metaspace at the requested addr.
 940 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
 941   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
 942   assert(using_class_space(), "called improperly");
 943   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
 944   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
 945          "Metaspace size is too big");
 946   assert_is_aligned(requested_addr, _reserve_alignment);
 947   assert_is_aligned(cds_base, _reserve_alignment);
 948   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
 949 
 950   // Don't use large pages for the class space.
 951   bool large_pages = false;
 952 
 953 #if !(defined(AARCH64) || defined(AIX))
 954   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
 955                                              _reserve_alignment,
 956                                              large_pages,
 957                                              requested_addr);
 958 #else // AARCH64
 959   ReservedSpace metaspace_rs;
 960 
 961   // Our compressed klass pointers may fit nicely into the lower 32
 962   // bits.
 963   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
 964     metaspace_rs = ReservedSpace(compressed_class_space_size(),
 965                                  _reserve_alignment,
 966                                  large_pages,
 967                                  requested_addr);
 968   }
 969 
 970   if (! metaspace_rs.is_reserved()) {
 971     // Aarch64: Try to align metaspace so that we can decode a compressed
 972     // klass with a single MOVK instruction.  We can do this iff the
 973     // compressed class base is a multiple of 4G.
 974     // Aix: Search for a place where we can find memory. If we need to load
 975     // the base, 4G alignment is helpful, too.
 976     size_t increment = AARCH64_ONLY(4*)G;
 977     for (char *a = align_up(requested_addr, increment);
 978          a < (char*)(1024*G);
 979          a += increment) {
 980       if (a == (char *)(32*G)) {
 981         // Go faster from here on. Zero-based is no longer possible.
 982         increment = 4*G;
 983       }
 984 
 985 #if INCLUDE_CDS
 986       if (UseSharedSpaces
 987           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
 988         // We failed to find an aligned base that will reach.  Fall
 989         // back to using our requested addr.
 990         metaspace_rs = ReservedSpace(compressed_class_space_size(),
 991                                      _reserve_alignment,
 992                                      large_pages,
 993                                      requested_addr);
 994         break;
 995       }
 996 #endif
 997 
 998       metaspace_rs = ReservedSpace(compressed_class_space_size(),
 999                                    _reserve_alignment,
1000                                    large_pages,
1001                                    a);
1002       if (metaspace_rs.is_reserved())
1003         break;
1004     }
1005   }
1006 
1007 #endif // AARCH64
1008 
1009   if (!metaspace_rs.is_reserved()) {
1010 #if INCLUDE_CDS
1011     if (UseSharedSpaces) {
1012       size_t increment = align_up(1*G, _reserve_alignment);
1013 
1014       // Keep trying to allocate the metaspace, increasing the requested_addr
1015       // by 1GB each time, until we reach an address that will no longer allow
1016       // use of CDS with compressed klass pointers.
1017       char *addr = requested_addr;
1018       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
1019              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
1020         addr = addr + increment;
1021         metaspace_rs = ReservedSpace(compressed_class_space_size(),
1022                                      _reserve_alignment, large_pages, addr);
1023       }
1024     }
1025 #endif
1026     // If no successful allocation then try to allocate the space anywhere.  If
1027     // that fails then OOM doom.  At this point we cannot try allocating the
1028     // metaspace as if UseCompressedClassPointers is off because too much
1029     // initialization has happened that depends on UseCompressedClassPointers.
1030     // So, UseCompressedClassPointers cannot be turned off at this point.
1031     if (!metaspace_rs.is_reserved()) {
1032       metaspace_rs = ReservedSpace(compressed_class_space_size(),
1033                                    _reserve_alignment, large_pages);
1034       if (!metaspace_rs.is_reserved()) {
1035         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
1036                                               compressed_class_space_size()));
1037       }
1038     }
1039   }
1040 
1041   // If we got here then the metaspace got allocated.
1042   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
1043 
1044 #if INCLUDE_CDS
1045   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
1046   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
1047     FileMapInfo::stop_sharing_and_unmap(
1048         "Could not allocate metaspace at a compatible address");
1049   }
1050 #endif
1051   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
1052                                   UseSharedSpaces ? (address)cds_base : 0);
1053 
1054   initialize_class_space(metaspace_rs);
1055 
1056   LogTarget(Trace, gc, metaspace) lt;
1057   if (lt.is_enabled()) {
1058     ResourceMark rm;
1059     LogStream ls(lt);
1060     print_compressed_class_space(&ls, requested_addr);
1061   }
1062 }
1063 
1064 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
1065   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
1066                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
1067   if (_class_space_list != NULL) {
1068     address base = (address)_class_space_list->current_virtual_space()->bottom();
1069     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
1070                  compressed_class_space_size(), p2i(base));
1071     if (requested_addr != 0) {
1072       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
1073     }
1074     st->cr();
1075   }
1076 }
1077 
1078 // For UseCompressedClassPointers the class space is reserved above the top of
1079 // the Java heap.  The argument passed in is at the base of the compressed space.
1080 void Metaspace::initialize_class_space(ReservedSpace rs) {
1081   // The reserved space size may be bigger because of alignment, esp with UseLargePages
1082   assert(rs.size() >= CompressedClassSpaceSize,
1083          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
1084   assert(using_class_space(), "Must be using class space");
1085   _class_space_list = new VirtualSpaceList(rs);
1086   _chunk_manager_class = new ChunkManager(true/*is_class*/);
1087 
1088   if (!_class_space_list->initialization_succeeded()) {
1089     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
1090   }
1091 }
1092 
1093 #endif
1094 
1095 void Metaspace::ergo_initialize() {
1096   if (DumpSharedSpaces) {
1097     // Using large pages when dumping the shared archive is currently not implemented.
1098     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
1099   }
1100 
1101   size_t page_size = os::vm_page_size();
1102   if (UseLargePages && UseLargePagesInMetaspace) {
1103     page_size = os::large_page_size();
1104   }
1105 
1106   _commit_alignment  = page_size;
1107   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
1108 
1109   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
1110   // override if MaxMetaspaceSize was set on the command line or not.
1111   // This information is needed later to conform to the specification of the
1112   // java.lang.management.MemoryUsage API.
1113   //
1114   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
1115   // globals.hpp to the aligned value, but this is not possible, since the
1116   // alignment depends on other flags being parsed.
1117   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
1118 
1119   if (MetaspaceSize > MaxMetaspaceSize) {
1120     MetaspaceSize = MaxMetaspaceSize;
1121   }
1122 
1123   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
1124 
1125   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
1126 
1127   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
1128   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
1129 
1130   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
1131 
1132   // Initial virtual space size will be calculated at global_initialize()
1133   size_t min_metaspace_sz =
1134       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
1135   if (UseCompressedClassPointers) {
1136     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
1137       if (min_metaspace_sz >= MaxMetaspaceSize) {
1138         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
1139       } else {
1140         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
1141                       MaxMetaspaceSize - min_metaspace_sz);
1142       }
1143     }
1144   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
1145     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
1146                   min_metaspace_sz);
1147   }
1148 
1149   set_compressed_class_space_size(CompressedClassSpaceSize);
1150 }
1151 
1152 void Metaspace::global_initialize() {
1153   MetaspaceGC::initialize();
1154 
1155 #if INCLUDE_CDS
1156   if (DumpSharedSpaces) {
1157     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
1158   } else if (UseSharedSpaces) {
1159     // If any of the archived space fails to map, UseSharedSpaces
1160     // is reset to false. Fall through to the
1161     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
1162     // metaspace.
1163     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
1164   }
1165 
1166   if (!DumpSharedSpaces && !UseSharedSpaces)
1167 #endif // INCLUDE_CDS
1168   {
1169 #ifdef _LP64
1170     if (using_class_space()) {
1171       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
1172       allocate_metaspace_compressed_klass_ptrs(base, 0);
1173     }
1174 #endif // _LP64
1175   }
1176 
1177   // Initialize these before initializing the VirtualSpaceList
1178   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
1179   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
1180   // Make the first class chunk bigger than a medium chunk so it's not put
1181   // on the medium chunk list.   The next chunk will be small and progress
1182   // from there.  This size calculated by -version.
1183   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
1184                                      (CompressedClassSpaceSize/BytesPerWord)*2);
1185   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
1186   // Arbitrarily set the initial virtual space to a multiple
1187   // of the boot class loader size.
1188   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
1189   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
1190 
1191   // Initialize the list of virtual spaces.
1192   _space_list = new VirtualSpaceList(word_size);
1193   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
1194 
1195   if (!_space_list->initialization_succeeded()) {
1196     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
1197   }
1198 
1199   _tracer = new MetaspaceTracer();
1200 }
1201 
1202 void Metaspace::post_initialize() {
1203   MetaspaceGC::post_initialize();
1204 }
1205 
1206 void Metaspace::verify_global_initialization() {
1207   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
1208   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
1209 
1210   if (using_class_space()) {
1211     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
1212     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
1213   }
1214 }
1215 
1216 size_t Metaspace::align_word_size_up(size_t word_size) {
1217   size_t byte_size = word_size * wordSize;
1218   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
1219 }
1220 
1221 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
1222                               MetaspaceObj::Type type, TRAPS) {
1223   assert(!_frozen, "sanity");
1224   if (HAS_PENDING_EXCEPTION) {
1225     assert(false, "Should not allocate with exception pending");
1226     return NULL;  // caller does a CHECK_NULL too
1227   }
1228 
1229   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
1230         "ClassLoaderData::the_null_class_loader_data() should have been used.");
1231 
1232   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
1233 
1234   // Try to allocate metadata.
1235   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
1236 
1237   if (result == NULL) {
1238     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
1239 
1240     // Allocation failed.
1241     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
1242       // Only start a GC if the bootstrapping has completed.
1243       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
1244       // the VM thread.
1245 
1246       // Try to clean out some memory and retry.
1247       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
1248     }
1249   }
1250 
1251   if (result == NULL) {
1252     if (DumpSharedSpaces) {
1253       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
1254       // We should abort to avoid generating a potentially bad archive.
1255       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
1256           MetaspaceObj::type_name(type), word_size * BytesPerWord);
1257       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
1258       vm_exit(1);
1259     }
1260     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
1261   }
1262 
1263   // Zero initialize.
1264   Copy::fill_to_words((HeapWord*)result, word_size, 0);
1265 
1266   return result;
1267 }
1268 
1269 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
1270   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
1271 
1272   // If result is still null, we are out of memory.
1273   Log(gc, metaspace, freelist, oom) log;
1274   if (log.is_info()) {
1275     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
1276              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
1277     ResourceMark rm;
1278     if (log.is_debug()) {
1279       if (loader_data->metaspace_or_null() != NULL) {
1280         LogStream ls(log.debug());
1281         loader_data->print_value_on(&ls);
1282       }
1283     }
1284     LogStream ls(log.info());
1285     // In case of an OOM, log out a short but still useful report.
1286     MetaspaceUtils::print_basic_report(&ls, 0);
1287   }
1288 
1289   bool out_of_compressed_class_space = false;
1290   if (is_class_space_allocation(mdtype)) {
1291     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
1292     out_of_compressed_class_space =
1293       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
1294       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
1295       CompressedClassSpaceSize;
1296   }
1297 
1298   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
1299   const char* space_string = out_of_compressed_class_space ?
1300     "Compressed class space" : "Metaspace";
1301 
1302   report_java_out_of_memory(space_string);
1303 
1304   if (JvmtiExport::should_post_resource_exhausted()) {
1305     JvmtiExport::post_resource_exhausted(
1306         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
1307         space_string);
1308   }
1309 
1310   if (!is_init_completed()) {
1311     vm_exit_during_initialization("OutOfMemoryError", space_string);
1312   }
1313 
1314   if (out_of_compressed_class_space) {
1315     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
1316   } else {
1317     THROW_OOP(Universe::out_of_memory_error_metaspace());
1318   }
1319 }
1320 
1321 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
1322   switch (mdtype) {
1323     case Metaspace::ClassType: return "Class";
1324     case Metaspace::NonClassType: return "Metadata";
1325     default:
1326       assert(false, "Got bad mdtype: %d", (int) mdtype);
1327       return NULL;
1328   }
1329 }
1330 
1331 void Metaspace::purge(MetadataType mdtype) {
1332   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
1333 }
1334 
1335 void Metaspace::purge() {
1336   MutexLockerEx cl(MetaspaceExpand_lock,
1337                    Mutex::_no_safepoint_check_flag);
1338   purge(NonClassType);
1339   if (using_class_space()) {
1340     purge(ClassType);
1341   }
1342 }
1343 
1344 bool Metaspace::contains(const void* ptr) {
1345   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1346     return true;
1347   }
1348   return contains_non_shared(ptr);
1349 }
1350 
1351 bool Metaspace::contains_non_shared(const void* ptr) {
1352   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
1353      return true;
1354   }
1355 
1356   return get_space_list(NonClassType)->contains(ptr);
1357 }
1358 
1359 // ClassLoaderMetaspace
1360 
1361 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
1362   : _lock(lock)
1363   , _space_type(type)
1364   , _vsm(NULL)
1365   , _class_vsm(NULL)
1366 {
1367   initialize(lock, type);
1368 }
1369 
1370 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
1371   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
1372   delete _vsm;
1373   if (Metaspace::using_class_space()) {
1374     delete _class_vsm;
1375   }
1376 }
1377 
1378 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1379   Metachunk* chunk = get_initialization_chunk(type, mdtype);
1380   if (chunk != NULL) {
1381     // Add to this manager's list of chunks in use and make it the current_chunk().
1382     get_space_manager(mdtype)->add_chunk(chunk, true);
1383   }
1384 }
1385 
1386 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1387   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
1388 
1389   // Get a chunk from the chunk freelist
1390   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
1391 
1392   if (chunk == NULL) {
1393     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
1394                                                   get_space_manager(mdtype)->medium_chunk_bunch());
1395   }
1396 
1397   return chunk;
1398 }
1399 
1400 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
1401   Metaspace::verify_global_initialization();
1402 
1403   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
1404 
1405   // Allocate SpaceManager for metadata objects.
1406   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
1407 
1408   if (Metaspace::using_class_space()) {
1409     // Allocate SpaceManager for classes.
1410     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
1411   }
1412 
1413   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1414 
1415   // Allocate chunk for metadata objects
1416   initialize_first_chunk(type, Metaspace::NonClassType);
1417 
1418   // Allocate chunk for class metadata objects
1419   if (Metaspace::using_class_space()) {
1420     initialize_first_chunk(type, Metaspace::ClassType);
1421   }
1422 }
1423 
1424 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1425   Metaspace::assert_not_frozen();
1426 
1427   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1428 
1429   // Don't use class_vsm() unless UseCompressedClassPointers is true.
1430   if (Metaspace::is_class_space_allocation(mdtype)) {
1431     return  class_vsm()->allocate(word_size);
1432   } else {
1433     return  vsm()->allocate(word_size);
1434   }
1435 }
1436 
1437 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1438   Metaspace::assert_not_frozen();
1439   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1440   assert(delta_bytes > 0, "Must be");
1441 
1442   size_t before = 0;
1443   size_t after = 0;
1444   MetaWord* res;
1445   bool incremented;
1446 
1447   // Each thread increments the HWM at most once. Even if the thread fails to increment
1448   // the HWM, an allocation is still attempted. This is because another thread must then
1449   // have incremented the HWM and therefore the allocation might still succeed.
1450   do {
1451     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1452     res = allocate(word_size, mdtype);
1453   } while (!incremented && res == NULL);
1454 
1455   if (incremented) {
1456     Metaspace::tracer()->report_gc_threshold(before, after,
1457                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1458     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1459   }
1460 
1461   return res;
1462 }
1463 
1464 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1465   return (vsm()->used_words() +
1466       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1467 }
1468 
1469 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1470   return (vsm()->capacity_words() +
1471       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1472 }
1473 
1474 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
1475   Metaspace::assert_not_frozen();
1476   assert(!SafepointSynchronize::is_at_safepoint()
1477          || Thread::current()->is_VM_thread(), "should be the VM thread");
1478 
1479   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
1480 
1481   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
1482 
1483   if (is_class && Metaspace::using_class_space()) {
1484     class_vsm()->deallocate(ptr, word_size);
1485   } else {
1486     vsm()->deallocate(ptr, word_size);
1487   }
1488 }
1489 
1490 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
1491   assert(Metaspace::using_class_space(), "Has to use class space");
1492   return class_vsm()->calc_chunk_size(word_size);
1493 }
1494 
1495 void ClassLoaderMetaspace::print_on(outputStream* out) const {
1496   // Print both class virtual space counts and metaspace.
1497   if (Verbose) {
1498     vsm()->print_on(out);
1499     if (Metaspace::using_class_space()) {
1500       class_vsm()->print_on(out);
1501     }
1502   }
1503 }
1504 
1505 void ClassLoaderMetaspace::verify() {
1506   vsm()->verify();
1507   if (Metaspace::using_class_space()) {
1508     class_vsm()->verify();
1509   }
1510 }
1511 
1512 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
1513   assert_lock_strong(lock());
1514   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
1515   if (Metaspace::using_class_space()) {
1516     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
1517   }
1518 }
1519 
1520 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
1521   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1522   add_to_statistics_locked(out);
1523 }
1524 
1525 /////////////// Unit tests ///////////////
1526 
1527 #ifndef PRODUCT
1528 
1529 class TestMetaspaceUtilsTest : AllStatic {
1530  public:
1531   static void test_reserved() {
1532     size_t reserved = MetaspaceUtils::reserved_bytes();
1533 
1534     assert(reserved > 0, "assert");
1535 
1536     size_t committed  = MetaspaceUtils::committed_bytes();
1537     assert(committed <= reserved, "assert");
1538 
1539     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
1540     assert(reserved_metadata > 0, "assert");
1541     assert(reserved_metadata <= reserved, "assert");
1542 
1543     if (UseCompressedClassPointers) {
1544       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
1545       assert(reserved_class > 0, "assert");
1546       assert(reserved_class < reserved, "assert");
1547     }
1548   }
1549 
1550   static void test_committed() {
1551     size_t committed = MetaspaceUtils::committed_bytes();
1552 
1553     assert(committed > 0, "assert");
1554 
1555     size_t reserved  = MetaspaceUtils::reserved_bytes();
1556     assert(committed <= reserved, "assert");
1557 
1558     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
1559     assert(committed_metadata > 0, "assert");
1560     assert(committed_metadata <= committed, "assert");
1561 
1562     if (UseCompressedClassPointers) {
1563       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
1564       assert(committed_class > 0, "assert");
1565       assert(committed_class < committed, "assert");
1566     }
1567   }
1568 
1569   static void test_virtual_space_list_large_chunk() {
1570     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
1571     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1572     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
1573     // vm_allocation_granularity aligned on Windows.
1574     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
1575     large_size += (os::vm_page_size()/BytesPerWord);
1576     vs_list->get_new_chunk(large_size, 0);
1577   }
1578 
1579   static void test() {
1580     test_reserved();
1581     test_committed();
1582     test_virtual_space_list_large_chunk();
1583   }
1584 };
1585 
1586 void TestMetaspaceUtils_test() {
1587   TestMetaspaceUtilsTest::test();
1588 }
1589 
1590 class TestVirtualSpaceNodeTest {
1591   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
1592                                           size_t& num_small_chunks,
1593                                           size_t& num_specialized_chunks) {
1594     num_medium_chunks = words_left / MediumChunk;
1595     words_left = words_left % MediumChunk;
1596 
1597     num_small_chunks = words_left / SmallChunk;
1598     words_left = words_left % SmallChunk;
1599     // how many specialized chunks can we get?
1600     num_specialized_chunks = words_left / SpecializedChunk;
1601     assert(words_left % SpecializedChunk == 0, "should be nothing left");
1602   }
1603 
1604  public:
1605   static void test() {
1606     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1607     const size_t vsn_test_size_words = MediumChunk  * 4;
1608     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
1609 
1610     // The chunk sizes must be multiples of eachother, or this will fail
1611     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
1612     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
1613 
1614     { // No committed memory in VSN
1615       ChunkManager cm(false);
1616       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1617       vsn.initialize();
1618       vsn.retire(&cm);
1619       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
1620     }
1621 
1622     { // All of VSN is committed, half is used by chunks
1623       ChunkManager cm(false);
1624       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1625       vsn.initialize();
1626       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
1627       vsn.get_chunk_vs(MediumChunk);
1628       vsn.get_chunk_vs(MediumChunk);
1629       vsn.retire(&cm);
1630       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
1631       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
1632     }
1633 
1634     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
1635     // This doesn't work for systems with vm_page_size >= 16K.
1636     if (page_chunks < MediumChunk) {
1637       // 4 pages of VSN is committed, some is used by chunks
1638       ChunkManager cm(false);
1639       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1640 
1641       vsn.initialize();
1642       vsn.expand_by(page_chunks, page_chunks);
1643       vsn.get_chunk_vs(SmallChunk);
1644       vsn.get_chunk_vs(SpecializedChunk);
1645       vsn.retire(&cm);
1646 
1647       // committed - used = words left to retire
1648       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
1649 
1650       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
1651       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
1652 
1653       assert(num_medium_chunks == 0, "should not get any medium chunks");
1654       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
1655       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
1656     }
1657 
1658     { // Half of VSN is committed, a humongous chunk is used
1659       ChunkManager cm(false);
1660       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
1661       vsn.initialize();
1662       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
1663       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
1664       vsn.retire(&cm);
1665 
1666       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
1667       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
1668       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
1669 
1670       assert(num_medium_chunks == 0, "should not get any medium chunks");
1671       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
1672       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
1673     }
1674 
1675   }
1676 
1677 #define assert_is_available_positive(word_size) \
1678   assert(vsn.is_available(word_size), \
1679          #word_size ": " PTR_FORMAT " bytes were not available in " \
1680          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
1681          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
1682 
1683 #define assert_is_available_negative(word_size) \
1684   assert(!vsn.is_available(word_size), \
1685          #word_size ": " PTR_FORMAT " bytes should not be available in " \
1686          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
1687          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
1688 
1689   static void test_is_available_positive() {
1690     // Reserve some memory.
1691     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
1692     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
1693 
1694     // Commit some memory.
1695     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
1696     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1697     assert(expanded, "Failed to commit");
1698 
1699     // Check that is_available accepts the committed size.
1700     assert_is_available_positive(commit_word_size);
1701 
1702     // Check that is_available accepts half the committed size.
1703     size_t expand_word_size = commit_word_size / 2;
1704     assert_is_available_positive(expand_word_size);
1705   }
1706 
1707   static void test_is_available_negative() {
1708     // Reserve some memory.
1709     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
1710     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
1711 
1712     // Commit some memory.
1713     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
1714     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1715     assert(expanded, "Failed to commit");
1716 
1717     // Check that is_available doesn't accept a too large size.
1718     size_t two_times_commit_word_size = commit_word_size * 2;
1719     assert_is_available_negative(two_times_commit_word_size);
1720   }
1721 
1722   static void test_is_available_overflow() {
1723     // Reserve some memory.
1724     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
1725     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
1726 
1727     // Commit some memory.
1728     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
1729     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1730     assert(expanded, "Failed to commit");
1731 
1732     // Calculate a size that will overflow the virtual space size.
1733     void* virtual_space_max = (void*)(uintptr_t)-1;
1734     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
1735     size_t overflow_size = bottom_to_max + BytesPerWord;
1736     size_t overflow_word_size = overflow_size / BytesPerWord;
1737 
1738     // Check that is_available can handle the overflow.
1739     assert_is_available_negative(overflow_word_size);
1740   }
1741 
1742   static void test_is_available() {
1743     TestVirtualSpaceNodeTest::test_is_available_positive();
1744     TestVirtualSpaceNodeTest::test_is_available_negative();
1745     TestVirtualSpaceNodeTest::test_is_available_overflow();
1746   }
1747 };
1748 
1749 #endif // !PRODUCT
1750 
1751 struct chunkmanager_statistics_t {
1752   int num_specialized_chunks;
1753   int num_small_chunks;
1754   int num_medium_chunks;
1755   int num_humongous_chunks;
1756 };
1757 
1758 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1759   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1760   ChunkManagerStatistics stat;
1761   chunk_manager->collect_statistics(&stat);
1762   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1763   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1764   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1765   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1766 }
1767 
1768 struct chunk_geometry_t {
1769   size_t specialized_chunk_word_size;
1770   size_t small_chunk_word_size;
1771   size_t medium_chunk_word_size;
1772 };
1773 
1774 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
1775   if (mdType == Metaspace::NonClassType) {
1776     out->specialized_chunk_word_size = SpecializedChunk;
1777     out->small_chunk_word_size = SmallChunk;
1778     out->medium_chunk_word_size = MediumChunk;
1779   } else {
1780     out->specialized_chunk_word_size = ClassSpecializedChunk;
1781     out->small_chunk_word_size = ClassSmallChunk;
1782     out->medium_chunk_word_size = ClassMediumChunk;
1783   }
1784 }