1 /*
   2  * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "aot/aotLoader.hpp"
  28 #include "gc/shared/collectedHeap.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/filemap.hpp"
  32 #include "memory/metaspace/metaspaceSizesSnapshot.hpp"
  33 #include "memory/metaspace/msChunkHeaderPool.hpp"
  34 #include "memory/metaspace/msChunkManager.hpp"
  35 #include "memory/metaspace/msCommitLimiter.hpp"
  36 #include "memory/metaspace/msCommon.hpp"
  37 #include "memory/metaspace/msContext.hpp"
  38 #include "memory/metaspace/msReport.hpp"
  39 #include "memory/metaspace/msRunningCounters.hpp"
  40 #include "memory/metaspace/msSettings.hpp"
  41 #include "memory/metaspace/msVirtualSpaceList.hpp"
  42 #include "memory/metaspace.hpp"
  43 #include "memory/metaspaceShared.hpp"
  44 #include "memory/metaspaceTracer.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/compressedOops.hpp"
  47 #include "runtime/atomic.hpp"
  48 #include "runtime/init.hpp"
  49 #include "runtime/java.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/formatBuffer.hpp"
  54 #include "utilities/globalDefinitions.hpp"
  55 
  56 
  57 using metaspace::ChunkManager;
  58 using metaspace::CommitLimiter;
  59 using metaspace::MetaspaceContext;
  60 using metaspace::MetaspaceReporter;
  61 using metaspace::RunningCounters;
  62 using metaspace::VirtualSpaceList;
  63 
  64 
  65 size_t MetaspaceUtils::used_words() {
  66   return RunningCounters::used_words();
  67 }
  68 
  69 size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) {
  70   return Metaspace::is_class_space_allocation(mdtype) ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
  71 }
  72 
  73 size_t MetaspaceUtils::reserved_words() {
  74   return RunningCounters::reserved_words();
  75 }
  76 
  77 size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) {
  78   return Metaspace::is_class_space_allocation(mdtype) ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
  79 }
  80 
  81 size_t MetaspaceUtils::committed_words() {
  82   return RunningCounters::committed_words();
  83 }
  84 
  85 size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) {
  86   return Metaspace::is_class_space_allocation(mdtype) ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
  87 }
  88 
  89 
  90 
  91 void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) {
  92   const metaspace::MetaspaceSizesSnapshot meta_values;
  93 
  94   // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
  95   // - used tells you how much memory is actually used for metadata
  96   // - committed tells you how much memory is committed for the purpose of metadata
  97   // The difference between those two would be waste, which can have various forms (freelists,
  98   //   unused parts of committed chunks etc)
  99   //
 100   // Left out is reserved, since this is not as exciting as the first two values: for class space,
 101   // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
 102   // be interesting since free chunks can be uncommitted, but for now it is left out.
 103 
 104   if (Metaspace::using_class_space()) {
 105     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
 106                             HEAP_CHANGE_FORMAT" "
 107                             HEAP_CHANGE_FORMAT,
 108                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 109                                                     pre_meta_values.used(),
 110                                                     pre_meta_values.committed(),
 111                                                     meta_values.used(),
 112                                                     meta_values.committed()),
 113                             HEAP_CHANGE_FORMAT_ARGS("NonClass",
 114                                                     pre_meta_values.non_class_used(),
 115                                                     pre_meta_values.non_class_committed(),
 116                                                     meta_values.non_class_used(),
 117                                                     meta_values.non_class_committed()),
 118                             HEAP_CHANGE_FORMAT_ARGS("Class",
 119                                                     pre_meta_values.class_used(),
 120                                                     pre_meta_values.class_committed(),
 121                                                     meta_values.class_used(),
 122                                                     meta_values.class_committed()));
 123   } else {
 124     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
 125                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 126                                                     pre_meta_values.used(),
 127                                                     pre_meta_values.committed(),
 128                                                     meta_values.used(),
 129                                                     meta_values.committed()));
 130   }
 131 }
 132 
 133 // This will print out a basic metaspace usage report but
 134 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
 135 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
 136   MetaspaceReporter::print_basic_report(out, scale);
 137 }
 138 
 139 // Prints a report about the current metaspace state.
 140 // Optional parts can be enabled via flags.
 141 // Function will walk the CLDG and will lock the expand lock; if that is not
 142 // convenient, use print_basic_report() instead.
 143 void MetaspaceUtils::print_report(outputStream* out, size_t scale) {
 144   const int flags =
 145       (int)MetaspaceReporter::Option::ShowLoaders |
 146       (int)MetaspaceReporter::Option::BreakDownByChunkType |
 147       (int)MetaspaceReporter::Option::ShowClasses;
 148   MetaspaceReporter::print_report(out, scale, flags);
 149 }
 150 
 151 void MetaspaceUtils::print_on(outputStream* out) {
 152 
 153   // Used from all GCs. It first prints out totals, then, separately, the class space portion.
 154 
 155   out->print_cr(" Metaspace       "
 156                 "used "      SIZE_FORMAT "K, "
 157                 "committed " SIZE_FORMAT "K, "
 158                 "reserved "  SIZE_FORMAT "K",
 159                 used_bytes()/K,
 160                 committed_bytes()/K,
 161                 reserved_bytes()/K);
 162 
 163   if (Metaspace::using_class_space()) {
 164     const Metaspace::MetadataType ct = Metaspace::ClassType;
 165     out->print_cr("  class space    "
 166                   "used "      SIZE_FORMAT "K, "
 167                   "committed " SIZE_FORMAT "K, "
 168                   "reserved "  SIZE_FORMAT "K",
 169                   used_bytes(ct)/K,
 170                   committed_bytes(ct)/K,
 171                   reserved_bytes(ct)/K);
 172   }
 173 }
 174 
 175 #ifdef ASSERT
 176 void MetaspaceUtils::verify() {
 177   if (Metaspace::initialized()) {
 178 
 179     // Verify non-class chunkmanager...
 180     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
 181     cm->verify();
 182 
 183     // ... and space list.
 184     VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
 185     vsl->verify();
 186 
 187     if (Metaspace::using_class_space()) {
 188       // If we use compressed class pointers, verify class chunkmanager...
 189       cm = ChunkManager::chunkmanager_class();
 190       cm->verify();
 191 
 192       // ... and class spacelist.
 193       vsl = VirtualSpaceList::vslist_class();
 194       vsl->verify();
 195     }
 196 
 197   }
 198 }
 199 #endif
 200 
 201 ////////////////////////////////7
 202 // MetaspaceGC methods
 203 
 204 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 205 uint MetaspaceGC::_shrink_factor = 0;
 206 
 207 // VM_CollectForMetadataAllocation is the vm operation used to GC.
 208 // Within the VM operation after the GC the attempt to allocate the metadata
 209 // should succeed.  If the GC did not free enough space for the metaspace
 210 // allocation, the HWM is increased so that another virtualspace will be
 211 // allocated for the metadata.  With perm gen the increase in the perm
 212 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
 213 // metaspace policy uses those as the small and large steps for the HWM.
 214 //
 215 // After the GC the compute_new_size() for MetaspaceGC is called to
 216 // resize the capacity of the metaspaces.  The current implementation
 217 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 218 // to resize the Java heap by some GC's.  New flags can be implemented
 219 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 220 // free space is desirable in the metaspace capacity to decide how much
 221 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 222 // free space is desirable in the metaspace capacity before decreasing
 223 // the HWM.
 224 
 225 // Calculate the amount to increase the high water mark (HWM).
 226 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 227 // another expansion is not requested too soon.  If that is not
 228 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 229 // If that is still not enough, expand by the size of the allocation
 230 // plus some.
 231 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 232   size_t min_delta = MinMetaspaceExpansion;
 233   size_t max_delta = MaxMetaspaceExpansion;
 234   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 235 
 236   if (delta <= min_delta) {
 237     delta = min_delta;
 238   } else if (delta <= max_delta) {
 239     // Don't want to hit the high water mark on the next
 240     // allocation so make the delta greater than just enough
 241     // for this allocation.
 242     delta = max_delta;
 243   } else {
 244     // This allocation is large but the next ones are probably not
 245     // so increase by the minimum.
 246     delta = delta + min_delta;
 247   }
 248 
 249   assert_is_aligned(delta, Metaspace::commit_alignment());
 250 
 251   return delta;
 252 }
 253 
 254 size_t MetaspaceGC::capacity_until_GC() {
 255   size_t value = Atomic::load_acquire(&_capacity_until_GC);
 256   assert(value >= MetaspaceSize, "Not initialized properly?");
 257   return value;
 258 }
 259 
 260 // Try to increase the _capacity_until_GC limit counter by v bytes.
 261 // Returns true if it succeeded. It may fail if either another thread
 262 // concurrently increased the limit or the new limit would be larger
 263 // than MaxMetaspaceSize.
 264 // On success, optionally returns new and old metaspace capacity in
 265 // new_cap_until_GC and old_cap_until_GC respectively.
 266 // On error, optionally sets can_retry to indicate whether if there is
 267 // actually enough space remaining to satisfy the request.
 268 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 269   assert_is_aligned(v, Metaspace::commit_alignment());
 270 
 271   size_t old_capacity_until_GC = _capacity_until_GC;
 272   size_t new_value = old_capacity_until_GC + v;
 273 
 274   if (new_value < old_capacity_until_GC) {
 275     // The addition wrapped around, set new_value to aligned max value.
 276     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 277   }
 278 
 279   if (new_value > MaxMetaspaceSize) {
 280     if (can_retry != NULL) {
 281       *can_retry = false;
 282     }
 283     return false;
 284   }
 285 
 286   if (can_retry != NULL) {
 287     *can_retry = true;
 288   }
 289   size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
 290 
 291   if (old_capacity_until_GC != prev_value) {
 292     return false;
 293   }
 294 
 295   if (new_cap_until_GC != NULL) {
 296     *new_cap_until_GC = new_value;
 297   }
 298   if (old_cap_until_GC != NULL) {
 299     *old_cap_until_GC = old_capacity_until_GC;
 300   }
 301   return true;
 302 }
 303 
 304 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 305   assert_is_aligned(v, Metaspace::commit_alignment());
 306 
 307   return Atomic::sub(&_capacity_until_GC, v);
 308 }
 309 
 310 void MetaspaceGC::initialize() {
 311   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 312   // we can't do a GC during initialization.
 313   _capacity_until_GC = MaxMetaspaceSize;
 314 }
 315 
 316 void MetaspaceGC::post_initialize() {
 317   // Reset the high-water mark once the VM initialization is done.
 318   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 319 }
 320 
 321 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 322   // Check if the compressed class space is full.
 323   if (is_class && Metaspace::using_class_space()) {
 324     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 325     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 326       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
 327                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 328       return false;
 329     }
 330   }
 331 
 332   // Check if the user has imposed a limit on the metaspace memory.
 333   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 334   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 335     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
 336               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 337     return false;
 338   }
 339 
 340   return true;
 341 }
 342 
 343 size_t MetaspaceGC::allowed_expansion() {
 344   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 345   size_t capacity_until_gc = capacity_until_GC();
 346 
 347   assert(capacity_until_gc >= committed_bytes,
 348          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
 349          capacity_until_gc, committed_bytes);
 350 
 351   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 352   size_t left_until_GC = capacity_until_gc - committed_bytes;
 353   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 354   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
 355             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
 356             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 357 
 358   return left_to_commit / BytesPerWord;
 359 }
 360 
 361 void MetaspaceGC::compute_new_size() {
 362   assert(_shrink_factor <= 100, "invalid shrink factor");
 363   uint current_shrink_factor = _shrink_factor;
 364   _shrink_factor = 0;
 365 
 366   // Using committed_bytes() for used_after_gc is an overestimation, since the
 367   // chunk free lists are included in committed_bytes() and the memory in an
 368   // un-fragmented chunk free list is available for future allocations.
 369   // However, if the chunk free lists becomes fragmented, then the memory may
 370   // not be available for future allocations and the memory is therefore "in use".
 371   // Including the chunk free lists in the definition of "in use" is therefore
 372   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 373   // shrink below committed_bytes() and this has caused serious bugs in the past.
 374   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 375   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 376 
 377   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 378   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 379 
 380   const double min_tmp = used_after_gc / maximum_used_percentage;
 381   size_t minimum_desired_capacity =
 382     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 383   // Don't shrink less than the initial generation size
 384   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 385                                   MetaspaceSize);
 386 
 387   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 388   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 389                            minimum_free_percentage, maximum_used_percentage);
 390   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 391 
 392 
 393   size_t shrink_bytes = 0;
 394   if (capacity_until_GC < minimum_desired_capacity) {
 395     // If we have less capacity below the metaspace HWM, then
 396     // increment the HWM.
 397     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 398     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 399     // Don't expand unless it's significant
 400     if (expand_bytes >= MinMetaspaceExpansion) {
 401       size_t new_capacity_until_GC = 0;
 402       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 403       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
 404 
 405       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 406                                                new_capacity_until_GC,
 407                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 408       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 409                                minimum_desired_capacity / (double) K,
 410                                expand_bytes / (double) K,
 411                                MinMetaspaceExpansion / (double) K,
 412                                new_capacity_until_GC / (double) K);
 413     }
 414     return;
 415   }
 416 
 417   // No expansion, now see if we want to shrink
 418   // We would never want to shrink more than this
 419   assert(capacity_until_GC >= minimum_desired_capacity,
 420          SIZE_FORMAT " >= " SIZE_FORMAT,
 421          capacity_until_GC, minimum_desired_capacity);
 422   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 423 
 424   // Should shrinking be considered?
 425   if (MaxMetaspaceFreeRatio < 100) {
 426     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 427     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 428     const double max_tmp = used_after_gc / minimum_used_percentage;
 429     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 430     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 431                                     MetaspaceSize);
 432     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 433                              maximum_free_percentage, minimum_used_percentage);
 434     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 435                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 436 
 437     assert(minimum_desired_capacity <= maximum_desired_capacity,
 438            "sanity check");
 439 
 440     if (capacity_until_GC > maximum_desired_capacity) {
 441       // Capacity too large, compute shrinking size
 442       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 443       // We don't want shrink all the way back to initSize if people call
 444       // System.gc(), because some programs do that between "phases" and then
 445       // we'd just have to grow the heap up again for the next phase.  So we
 446       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 447       // on the third call, and 100% by the fourth call.  But if we recompute
 448       // size without shrinking, it goes back to 0%.
 449       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 450 
 451       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 452 
 453       assert(shrink_bytes <= max_shrink_bytes,
 454              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
 455              shrink_bytes, max_shrink_bytes);
 456       if (current_shrink_factor == 0) {
 457         _shrink_factor = 10;
 458       } else {
 459         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 460       }
 461       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 462                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
 463       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 464                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
 465     }
 466   }
 467 
 468   // Don't shrink unless it's significant
 469   if (shrink_bytes >= MinMetaspaceExpansion &&
 470       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 471     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 472     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 473                                              new_capacity_until_GC,
 474                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 475   }
 476 }
 477 
 478 
 479 
 480 //////  Metaspace methods /////
 481 
 482 const MetaspaceTracer* Metaspace::_tracer = NULL;
 483 
 484 DEBUG_ONLY(bool Metaspace::_frozen = false;)
 485 
 486 bool Metaspace::initialized() {
 487   return metaspace::MetaspaceContext::context_nonclass() != NULL &&
 488       (using_class_space() ? metaspace::MetaspaceContext::context_class() != NULL : true);
 489 }
 490 
 491 #ifdef _LP64
 492 
 493 void Metaspace::print_compressed_class_space(outputStream* st) {
 494   if (VirtualSpaceList::vslist_class() != NULL) {
 495     MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
 496     size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
 497     MetaWord* top = base + size;
 498     st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: " SIZE_FORMAT,
 499                p2i(base), p2i(top), (top - base) * BytesPerWord);
 500     st->cr();
 501   }
 502 }
 503 
 504 // Given a prereserved space, use that to set up the compressed class space list.
 505 void Metaspace::initialize_class_space(ReservedSpace rs) {
 506   assert(rs.size() >= CompressedClassSpaceSize,
 507          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
 508   assert(using_class_space(), "Must be using class space");
 509 
 510   assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT,
 511          rs.size(), CompressedClassSpaceSize);
 512   assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
 513          is_aligned(rs.size(), Metaspace::reserve_alignment()),
 514          "wrong alignment");
 515 
 516   MetaspaceContext::initialize_class_space_context(rs);
 517 
 518   // This does currently not work because rs may be the result of a split
 519   // operation and NMT seems not to be able to handle splits.
 520   // Will be fixed with JDK-8243535.
 521   // MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
 522 
 523 }
 524 
 525 // Returns true if class space has been setup (initialize_class_space).
 526 bool Metaspace::class_space_is_initialized() {
 527   return MetaspaceContext::context_class() != NULL;
 528 }
 529 
 530 // Reserve a range of memory at an address suitable for en/decoding narrow
 531 // Klass pointers (see: CompressedClassPointers::is_valid_base()).
 532 // The returned address shall both be suitable as a compressed class pointers
 533 //  base, and aligned to Metaspace::reserve_alignment (which is equal to or a
 534 //  multiple of allocation granularity).
 535 // On error, returns an unreserved space.
 536 ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size) {
 537 
 538 #ifdef AARCH64
 539   const size_t alignment = Metaspace::reserve_alignment();
 540 
 541   // AArch64: Try to align metaspace so that we can decode a compressed
 542   // klass with a single MOVK instruction. We can do this iff the
 543   // compressed class base is a multiple of 4G.
 544   // Additionally, above 32G, ensure the lower LogKlassAlignmentInBytes bits
 545   // of the upper 32-bits of the address are zero so we can handle a shift
 546   // when decoding.
 547 
 548   static const struct {
 549     address from;
 550     address to;
 551     size_t increment;
 552   } search_ranges[] = {
 553     {  (address)(4*G),   (address)(32*G),   4*G, },
 554     {  (address)(32*G),  (address)(1024*G), (4 << LogKlassAlignmentInBytes) * G },
 555     {  NULL, NULL, 0 }
 556   };
 557 
 558   for (int i = 0; search_ranges[i].from != NULL; i ++) {
 559     address a = search_ranges[i].from;
 560     assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
 561     while (a < search_ranges[i].to) {
 562       ReservedSpace rs(size, Metaspace::reserve_alignment(),
 563                        false /*large_pages*/, (char*)a);
 564       if (rs.is_reserved()) {
 565         assert(a == (address)rs.base(), "Sanity");
 566         return rs;
 567       }
 568       a +=  search_ranges[i].increment;
 569     }
 570   }
 571 
 572   // Note: on AARCH64, if the code above does not find any good placement, we
 573   // have no recourse. We return an empty space and the VM will exit.
 574   return ReservedSpace();
 575 #else
 576   // Default implementation: Just reserve anywhere.
 577   return ReservedSpace(size, Metaspace::reserve_alignment(), false, (char*)NULL);
 578 #endif // AARCH64
 579 }
 580 
 581 #endif // _LP64
 582 
 583 
 584 size_t Metaspace::reserve_alignment_words() {
 585   return metaspace::Settings::virtual_space_node_reserve_alignment_words();
 586 }
 587 
 588 size_t Metaspace::commit_alignment_words() {
 589   return metaspace::Settings::commit_granule_words();
 590 }
 591 
 592 void Metaspace::ergo_initialize() {
 593 
 594   // Must happen before using any setting from Settings::---
 595   metaspace::Settings::ergo_initialize();
 596 
 597   // MaxMetaspaceSize and CompressedClassSpaceSize:
 598   //
 599   // MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed
 600   //  to commit for the Metaspace.
 601   //  It is just a number; a limit we compare against before committing. It
 602   //  does not have to be aligned to anything.
 603   //  It gets used as compare value in class CommitLimiter.
 604   //  It is set to max_uintx in globals.hpp by default, so by default it does
 605   //  not limit anything.
 606   //
 607   // CompressedClassSpaceSize is the size, in bytes, of the address range we
 608   //  pre-reserve for the compressed class space (if we use class space).
 609   //  This size has to be aligned to the metaspace reserve alignment (to the
 610   //  size of a root chunk). It gets aligned up from whatever value the caller
 611   //  gave us to the next multiple of root chunk size.
 612   //
 613   // Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have
 614   //  very little to do with each other. The notion often encountered:
 615   //  MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size>
 616   //  is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize,
 617   //  in which case we just would not be able to fully commit the class space range.
 618   //
 619   // We still adjust CompressedClassSpaceSize to reasonable limits, mainly to
 620   //  save on reserved space, and to make ergnonomics less confusing.
 621 
 622   // (aligned just for cleanliness:)
 623   MaxMetaspaceSize = MAX2(align_down(MaxMetaspaceSize, commit_alignment()), commit_alignment());
 624 
 625   if (UseCompressedClassPointers) {
 626     // Let CCS size not be larger than 80% of MaxMetaspaceSize. Note that is
 627     // grossly over-dimensioned for most usage scenarios; typical ratio of
 628     // class space : non class space usage is about 1:6. With many small classes,
 629     // it can get as low as 1:2. It is not a big deal though since ccs is only
 630     // reserved and will be committed on demand only.
 631     size_t max_ccs_size = MaxMetaspaceSize * 0.8;
 632     size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size);
 633 
 634     // CCS must be aligned to root chunk size, and be at least the size of one
 635     //  root chunk.
 636     adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment());
 637     adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment());
 638 
 639     // Note: re-adjusting may have us left with a CompressedClassSpaceSize
 640     //  larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize.
 641     //  Lets just live with that, its not a big deal.
 642 
 643     if (adjusted_ccs_size != CompressedClassSpaceSize) {
 644       FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size);
 645       log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".",
 646                           CompressedClassSpaceSize);
 647     }
 648   }
 649 
 650   // Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion
 651   if (MetaspaceSize > MaxMetaspaceSize) {
 652     MetaspaceSize = MaxMetaspaceSize;
 653   }
 654 
 655   MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment());
 656 
 657   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 658 
 659   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment());
 660   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment());
 661 
 662 }
 663 
 664 void Metaspace::global_initialize() {
 665   MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
 666 
 667   metaspace::ChunkHeaderPool::initialize();
 668 
 669   // If UseCompressedClassPointers=1, we have two cases:
 670   // a) if CDS is active (either dump time or runtime), it will create the ccs
 671   //    for us, initialize it and set up CompressedKlassPointers encoding.
 672   //    Class space will be reserved above the mapped archives.
 673   // b) if CDS is not active, we will create the ccs on our own. It will be
 674   //    placed above the java heap, since we assume it has been placed in low
 675   //    address regions. We may rethink this (see JDK-8244943). Failing that,
 676   //    it will be placed anywhere.
 677 
 678 #if INCLUDE_CDS
 679   // case (a)
 680   if (DumpSharedSpaces) {
 681     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
 682   } else if (UseSharedSpaces) {
 683     // If any of the archived space fails to map, UseSharedSpaces
 684     // is reset to false.
 685     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
 686   }
 687 
 688   if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
 689     vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
 690   }
 691 #endif // INCLUDE_CDS
 692 
 693 #ifdef _LP64
 694 
 695   if (using_class_space() && !class_space_is_initialized()) {
 696     assert(!UseSharedSpaces && !DumpSharedSpaces, "CDS should be off at this point");
 697 
 698     // case (b)
 699     ReservedSpace rs;
 700 
 701     // If UseCompressedOops=1, java heap may have been placed in coops-friendly
 702     //  territory already (lower address regions), so we attempt to place ccs
 703     //  right above the java heap.
 704     // If UseCompressedOops=0, the heap has been placed anywhere - probably in
 705     //  high memory regions. In that case, try to place ccs at the lowest allowed
 706     //  mapping address.
 707     address base = UseCompressedOops ? CompressedOops::end() : (address)HeapBaseMinAddress;
 708     base = align_up(base, Metaspace::reserve_alignment());
 709 
 710     const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
 711     if (base != NULL) {
 712       if (CompressedKlassPointers::is_valid_base(base)) {
 713         rs = ReservedSpace(size, Metaspace::reserve_alignment(),
 714                            false /* large */, (char*)base);
 715       }
 716     }
 717 
 718     // ...failing that, reserve anywhere, but let platform do optimized placement:
 719     if (!rs.is_reserved()) {
 720       rs = Metaspace::reserve_address_space_for_compressed_classes(size);
 721     }
 722 
 723     // ...failing that, give up.
 724     if (!rs.is_reserved()) {
 725       vm_exit_during_initialization(
 726           err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes",
 727                    CompressedClassSpaceSize));
 728     }
 729 
 730     // Initialize space
 731     Metaspace::initialize_class_space(rs);
 732 
 733     // Set up compressed class pointer encoding.
 734     CompressedKlassPointers::initialize((address)rs.base(), rs.size());
 735   }
 736 
 737 #endif
 738 
 739   // Initialize non-class virtual space list, and its chunk manager:
 740   MetaspaceContext::initialize_nonclass_space_context();
 741 
 742   _tracer = new MetaspaceTracer();
 743 
 744   // We must prevent the very first address of the ccs from being used to store
 745   // metadata, since that address would translate to a narrow pointer of 0, and the
 746   // VM does not distinguish between "narrow 0 as in NULL" and "narrow 0 as in start
 747   //  of ccs".
 748   // Before Elastic Metaspace that did not happen due to the fact that every Metachunk
 749   // had a header and therefore could not allocate anything at offset 0.
 750 #ifdef _LP64
 751   if (using_class_space()) {
 752     // The simplest way to fix this is to allocate a tiny dummy chunk right at the
 753     // start of ccs and do not use it for anything.
 754     MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL);
 755   }
 756 #endif
 757 
 758 #ifdef _LP64
 759   if (UseCompressedClassPointers) {
 760     // Note: "cds" would be a better fit but keep this for backward compatibility.
 761     LogTarget(Info, gc, metaspace) lt;
 762     if (lt.is_enabled()) {
 763       ResourceMark rm;
 764       LogStream ls(lt);
 765       CDS_ONLY(MetaspaceShared::print_on(&ls);)
 766       Metaspace::print_compressed_class_space(&ls);
 767       CompressedKlassPointers::print_mode(&ls);
 768     }
 769   }
 770 #endif
 771 
 772 }
 773 
 774 void Metaspace::post_initialize() {
 775   MetaspaceGC::post_initialize();
 776 }
 777 
 778 size_t Metaspace::max_allocation_word_size() {
 779   const size_t max_overhead_words = metaspace::get_raw_word_size_for_requested_word_size(1);
 780   return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE - max_overhead_words;
 781 }
 782 
 783 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 784                               MetaspaceObj::Type type, TRAPS) {
 785   assert(word_size <= Metaspace::max_allocation_word_size(),
 786          "allocation size too large (" SIZE_FORMAT ")", word_size);
 787   assert(!_frozen, "sanity");
 788   assert(!(DumpSharedSpaces && THREAD->is_VM_thread()), "sanity");
 789 
 790   if (HAS_PENDING_EXCEPTION) {
 791     assert(false, "Should not allocate with exception pending");
 792     return NULL;  // caller does a CHECK_NULL too
 793   }
 794 
 795   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
 796         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 797 
 798   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
 799 
 800   // Try to allocate metadata.
 801   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 802 
 803   if (result == NULL) {
 804     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 805 
 806     // Allocation failed.
 807     if (is_init_completed()) {
 808       // Only start a GC if the bootstrapping has completed.
 809       // Try to clean out some heap memory and retry. This can prevent premature
 810       // expansion of the metaspace.
 811       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
 812     }
 813   }
 814 
 815   if (result == NULL) {
 816     if (DumpSharedSpaces) {
 817       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
 818       // We should abort to avoid generating a potentially bad archive.
 819       vm_exit_during_cds_dumping(err_msg("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
 820           MetaspaceObj::type_name(type), word_size * BytesPerWord),
 821         err_msg("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize));
 822     }
 823     report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
 824     assert(HAS_PENDING_EXCEPTION, "sanity");
 825     return NULL;
 826   }
 827 
 828   // Zero initialize.
 829   Copy::fill_to_words((HeapWord*)result, word_size, 0);
 830 
 831   log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
 832 
 833   return result;
 834 }
 835 
 836 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
 837   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
 838 
 839   // If result is still null, we are out of memory.
 840   Log(gc, metaspace, freelist, oom) log;
 841   if (log.is_info()) {
 842     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
 843              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
 844     ResourceMark rm;
 845     if (log.is_debug()) {
 846       if (loader_data->metaspace_or_null() != NULL) {
 847         LogStream ls(log.debug());
 848         loader_data->print_value_on(&ls);
 849       }
 850     }
 851     LogStream ls(log.info());
 852     // In case of an OOM, log out a short but still useful report.
 853     MetaspaceUtils::print_basic_report(&ls, 0);
 854   }
 855 
 856   // TODO: this exception text may be wrong and misleading. This needs more thinking. See JDK-8252189.
 857   bool out_of_compressed_class_space = false;
 858   if (is_class_space_allocation(mdtype)) {
 859     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
 860     out_of_compressed_class_space =
 861       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
 862       align_up(word_size * BytesPerWord, 4 * M) >
 863       CompressedClassSpaceSize;
 864   }
 865 
 866   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 867   const char* space_string = out_of_compressed_class_space ?
 868     "Compressed class space" : "Metaspace";
 869 
 870   report_java_out_of_memory(space_string);
 871 
 872   if (JvmtiExport::should_post_resource_exhausted()) {
 873     JvmtiExport::post_resource_exhausted(
 874         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 875         space_string);
 876   }
 877 
 878   if (!is_init_completed()) {
 879     vm_exit_during_initialization("OutOfMemoryError", space_string);
 880   }
 881 
 882   if (out_of_compressed_class_space) {
 883     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
 884   } else {
 885     THROW_OOP(Universe::out_of_memory_error_metaspace());
 886   }
 887 }
 888 
 889 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
 890   switch (mdtype) {
 891     case Metaspace::ClassType: return "Class";
 892     case Metaspace::NonClassType: return "Metadata";
 893     default:
 894       assert(false, "Got bad mdtype: %d", (int) mdtype);
 895       return NULL;
 896   }
 897 }
 898 
 899 void Metaspace::purge() {
 900   ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
 901   if (cm != NULL) {
 902     cm->purge();
 903   }
 904   if (using_class_space()) {
 905     cm = ChunkManager::chunkmanager_class();
 906     if (cm != NULL) {
 907       cm->purge();
 908     }
 909   }
 910 }
 911 
 912 bool Metaspace::contains(const void* ptr) {
 913   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
 914     return true;
 915   }
 916   return contains_non_shared(ptr);
 917 }
 918 
 919 bool Metaspace::contains_non_shared(const void* ptr) {
 920   if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) {
 921      return true;
 922   }
 923 
 924   return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
 925 }