1 /*
   2  * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "aot/aotLoader.hpp"
  28 #include "gc/shared/collectedHeap.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/filemap.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/metaspaceShared.hpp"
  34 #include "memory/metaspaceTracer.hpp"
  35 #include "memory/metaspace/chunkManager.hpp"
  36 #include "memory/metaspace/classLoaderMetaspace.hpp"
  37 #include "memory/metaspace/commitLimiter.hpp"
  38 #include "memory/metaspace/metaspaceCommon.hpp"
  39 #include "memory/metaspace/metaspaceEnums.hpp"
  40 #include "memory/metaspace/metaspaceReport.hpp"
  41 #include "memory/metaspace/metaspaceSizesSnapshot.hpp"
  42 #include "memory/metaspace/runningCounters.hpp"
  43 #include "memory/metaspace/virtualSpaceList.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/compressedOops.hpp"
  46 #include "runtime/init.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/orderAccess.hpp"
  49 #include "services/memTracker.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/formatBuffer.hpp"
  53 #include "utilities/globalDefinitions.hpp"
  54 
  55 
  56 using metaspace::ChunkManager;
  57 using metaspace::ClassLoaderMetaspace;
  58 using metaspace::CommitLimiter;
  59 using metaspace::MetaspaceType;
  60 using metaspace::MetadataType;
  61 using metaspace::MetaspaceReporter;
  62 using metaspace::RunningCounters;
  63 using metaspace::VirtualSpaceList;
  64 
  65 
  66 // Used by MetaspaceCounters
  67 size_t MetaspaceUtils::free_chunks_total_words(MetadataType mdtype) {
  68   return is_class(mdtype) ? RunningCounters::free_chunks_words_class() : RunningCounters::free_chunks_words_nonclass();
  69 }
  70 
  71 size_t MetaspaceUtils::used_words() {
  72   return RunningCounters::used_words();
  73 }
  74 
  75 size_t MetaspaceUtils::used_words(MetadataType mdtype) {
  76   return is_class(mdtype) ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
  77 }
  78 
  79 size_t MetaspaceUtils::reserved_words() {
  80   return RunningCounters::reserved_words();
  81 }
  82 
  83 size_t MetaspaceUtils::reserved_words(MetadataType mdtype) {
  84   return is_class(mdtype) ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
  85 }
  86 
  87 size_t MetaspaceUtils::committed_words() {
  88   return RunningCounters::committed_words();
  89 }
  90 
  91 size_t MetaspaceUtils::committed_words(MetadataType mdtype) {
  92   return is_class(mdtype) ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
  93 }
  94 
  95 
  96 
  97 void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) {
  98   const metaspace::MetaspaceSizesSnapshot meta_values;
  99 
 100   // We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
 101   // - used tells you how much memory is actually used for metadata
 102   // - committed tells you how much memory is committed for the purpose of metadata
 103   // The difference between those two would be waste, which can have various forms (freelists,
 104   //   unused parts of committed chunks etc)
 105   //
 106   // Left out is reserved, since this is not as exciting as the first two values: for class space,
 107   // it is a constant (to uninformed users, often confusingly large). For non-class space, it would
 108   // be interesting since free chunks can be uncommitted, but for now it is left out.
 109 
 110   if (Metaspace::using_class_space()) {
 111     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
 112                             HEAP_CHANGE_FORMAT" "
 113                             HEAP_CHANGE_FORMAT,
 114                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 115                                                     pre_meta_values.used(),
 116                                                     pre_meta_values.committed(),
 117                                                     meta_values.used(),
 118                                                     meta_values.committed()),
 119                             HEAP_CHANGE_FORMAT_ARGS("NonClass",
 120                                                     pre_meta_values.non_class_used(),
 121                                                     pre_meta_values.non_class_committed(),
 122                                                     meta_values.non_class_used(),
 123                                                     meta_values.non_class_committed()),
 124                             HEAP_CHANGE_FORMAT_ARGS("Class",
 125                                                     pre_meta_values.class_used(),
 126                                                     pre_meta_values.class_committed(),
 127                                                     meta_values.class_used(),
 128                                                     meta_values.class_committed()));
 129   } else {
 130     log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
 131                             HEAP_CHANGE_FORMAT_ARGS("Metaspace",
 132                                                     pre_meta_values.used(),
 133                                                     pre_meta_values.committed(),
 134                                                     meta_values.used(),
 135                                                     meta_values.committed()));
 136   }
 137 }
 138 
 139 
 140 // Prints an ASCII representation of the given space.
 141 void MetaspaceUtils::print_metaspace_map(outputStream* out, MetadataType mdtype) {
 142   out->print_cr("-- not yet implemented ---");
 143 }
 144 
 145 // This will print out a basic metaspace usage report but
 146 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
 147 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
 148   MetaspaceReporter::print_basic_report(out, scale);
 149 }
 150 
 151 // Prints a report about the current metaspace state.
 152 // Optional parts can be enabled via flags.
 153 // Function will walk the CLDG and will lock the expand lock; if that is not
 154 // convenient, use print_basic_report() instead.
 155 void MetaspaceUtils::print_full_report(outputStream* out, size_t scale) {
 156   const int flags =
 157       MetaspaceReporter::rf_show_loaders |
 158       MetaspaceReporter::rf_break_down_by_chunktype |
 159       MetaspaceReporter::rf_show_classes;
 160   MetaspaceReporter::print_report(out, scale, flags);
 161 }
 162 
 163 void MetaspaceUtils::print_on(outputStream* out) {
 164 
 165   // Used from all GCs. It first prints out totals, then, separately, the class space portion.
 166 
 167   out->print_cr(" Metaspace       "
 168                 "used "      SIZE_FORMAT "K, "
 169                 "committed " SIZE_FORMAT "K, "
 170                 "reserved "  SIZE_FORMAT "K",
 171                 used_bytes()/K,
 172                 committed_bytes()/K,
 173                 reserved_bytes()/K);
 174 
 175   if (Metaspace::using_class_space()) {
 176     const MetadataType ct = metaspace::ClassType;
 177     out->print_cr("  class space    "
 178                   "used "      SIZE_FORMAT "K, "
 179                   "committed " SIZE_FORMAT "K, "
 180                   "reserved "  SIZE_FORMAT "K",
 181                   used_bytes(ct)/K,
 182                   committed_bytes(ct)/K,
 183                   reserved_bytes(ct)/K);
 184   }
 185 }
 186 
 187 #ifdef ASSERT
 188 void MetaspaceUtils::verify(bool slow) {
 189   if (Metaspace::initialized()) {
 190 
 191     // Verify non-class chunkmanager...
 192     ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
 193     cm->verify(slow);
 194 
 195     // ... and space list.
 196     VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
 197     vsl->verify(slow);
 198 
 199     if (Metaspace::using_class_space()) {
 200       // If we use compressed class pointers, verify class chunkmanager...
 201       cm = ChunkManager::chunkmanager_class();
 202       assert(cm != NULL, "Sanity");
 203       cm->verify(slow);
 204 
 205       // ... and class spacelist.
 206       VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
 207       assert(vsl != NULL, "Sanity");
 208       vsl->verify(slow);
 209     }
 210 
 211   }
 212 }
 213 #endif
 214 
 215 ////////////////////////////////7
 216 // MetaspaceGC methods
 217 
 218 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 219 uint MetaspaceGC::_shrink_factor = 0;
 220 bool MetaspaceGC::_should_concurrent_collect = false;
 221 
 222 // VM_CollectForMetadataAllocation is the vm operation used to GC.
 223 // Within the VM operation after the GC the attempt to allocate the metadata
 224 // should succeed.  If the GC did not free enough space for the metaspace
 225 // allocation, the HWM is increased so that another virtualspace will be
 226 // allocated for the metadata.  With perm gen the increase in the perm
 227 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
 228 // metaspace policy uses those as the small and large steps for the HWM.
 229 //
 230 // After the GC the compute_new_size() for MetaspaceGC is called to
 231 // resize the capacity of the metaspaces.  The current implementation
 232 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 233 // to resize the Java heap by some GC's.  New flags can be implemented
 234 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 235 // free space is desirable in the metaspace capacity to decide how much
 236 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 237 // free space is desirable in the metaspace capacity before decreasing
 238 // the HWM.
 239 
 240 // Calculate the amount to increase the high water mark (HWM).
 241 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 242 // another expansion is not requested too soon.  If that is not
 243 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 244 // If that is still not enough, expand by the size of the allocation
 245 // plus some.
 246 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 247   size_t min_delta = MinMetaspaceExpansion;
 248   size_t max_delta = MaxMetaspaceExpansion;
 249   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 250 
 251   if (delta <= min_delta) {
 252     delta = min_delta;
 253   } else if (delta <= max_delta) {
 254     // Don't want to hit the high water mark on the next
 255     // allocation so make the delta greater than just enough
 256     // for this allocation.
 257     delta = max_delta;
 258   } else {
 259     // This allocation is large but the next ones are probably not
 260     // so increase by the minimum.
 261     delta = delta + min_delta;
 262   }
 263 
 264   assert_is_aligned(delta, Metaspace::commit_alignment());
 265 
 266   return delta;
 267 }
 268 
 269 size_t MetaspaceGC::capacity_until_GC() {
 270   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
 271   assert(value >= MetaspaceSize, "Not initialized properly?");
 272   return value;
 273 }
 274 
 275 // Try to increase the _capacity_until_GC limit counter by v bytes.
 276 // Returns true if it succeeded. It may fail if either another thread
 277 // concurrently increased the limit or the new limit would be larger
 278 // than MaxMetaspaceSize.
 279 // On success, optionally returns new and old metaspace capacity in
 280 // new_cap_until_GC and old_cap_until_GC respectively.
 281 // On error, optionally sets can_retry to indicate whether if there is
 282 // actually enough space remaining to satisfy the request.
 283 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 284   assert_is_aligned(v, Metaspace::commit_alignment());
 285 
 286   size_t old_capacity_until_GC = _capacity_until_GC;
 287   size_t new_value = old_capacity_until_GC + v;
 288 
 289   if (new_value < old_capacity_until_GC) {
 290     // The addition wrapped around, set new_value to aligned max value.
 291     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 292   }
 293 
 294   if (new_value > MaxMetaspaceSize) {
 295     if (can_retry != NULL) {
 296       *can_retry = false;
 297     }
 298     return false;
 299   }
 300 
 301   if (can_retry != NULL) {
 302     *can_retry = true;
 303   }
 304   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 305 
 306   if (old_capacity_until_GC != prev_value) {
 307     return false;
 308   }
 309 
 310   if (new_cap_until_GC != NULL) {
 311     *new_cap_until_GC = new_value;
 312   }
 313   if (old_cap_until_GC != NULL) {
 314     *old_cap_until_GC = old_capacity_until_GC;
 315   }
 316   return true;
 317 }
 318 
 319 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 320   assert_is_aligned(v, Metaspace::commit_alignment());
 321 
 322   return Atomic::sub(v, &_capacity_until_GC);
 323 }
 324 
 325 void MetaspaceGC::initialize() {
 326   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 327   // we can't do a GC during initialization.
 328   _capacity_until_GC = MaxMetaspaceSize;
 329 }
 330 
 331 void MetaspaceGC::post_initialize() {
 332   // Reset the high-water mark once the VM initialization is done.
 333   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 334 }
 335 
 336 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 337   // Check if the compressed class space is full.
 338   if (is_class && Metaspace::using_class_space()) {
 339     size_t class_committed = MetaspaceUtils::committed_bytes(metaspace::ClassType);
 340     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 341       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
 342                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 343       return false;
 344     }
 345   }
 346 
 347   // Check if the user has imposed a limit on the metaspace memory.
 348   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 349   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 350     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
 351               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 352     return false;
 353   }
 354 
 355   return true;
 356 }
 357 
 358 size_t MetaspaceGC::allowed_expansion() {
 359   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 360   size_t capacity_until_gc = capacity_until_GC();
 361 
 362   assert(capacity_until_gc >= committed_bytes,
 363          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
 364          capacity_until_gc, committed_bytes);
 365 
 366   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 367   size_t left_until_GC = capacity_until_gc - committed_bytes;
 368   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 369   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
 370             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
 371             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 372 
 373   return left_to_commit / BytesPerWord;
 374 }
 375 
 376 void MetaspaceGC::compute_new_size() {
 377   assert(_shrink_factor <= 100, "invalid shrink factor");
 378   uint current_shrink_factor = _shrink_factor;
 379   _shrink_factor = 0;
 380 
 381   // Using committed_bytes() for used_after_gc is an overestimation, since the
 382   // chunk free lists are included in committed_bytes() and the memory in an
 383   // un-fragmented chunk free list is available for future allocations.
 384   // However, if the chunk free lists becomes fragmented, then the memory may
 385   // not be available for future allocations and the memory is therefore "in use".
 386   // Including the chunk free lists in the definition of "in use" is therefore
 387   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 388   // shrink below committed_bytes() and this has caused serious bugs in the past.
 389   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 390   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 391 
 392   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 393   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 394 
 395   const double min_tmp = used_after_gc / maximum_used_percentage;
 396   size_t minimum_desired_capacity =
 397     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 398   // Don't shrink less than the initial generation size
 399   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 400                                   MetaspaceSize);
 401 
 402   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 403   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 404                            minimum_free_percentage, maximum_used_percentage);
 405   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 406 
 407 
 408   size_t shrink_bytes = 0;
 409   if (capacity_until_GC < minimum_desired_capacity) {
 410     // If we have less capacity below the metaspace HWM, then
 411     // increment the HWM.
 412     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 413     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 414     // Don't expand unless it's significant
 415     if (expand_bytes >= MinMetaspaceExpansion) {
 416       size_t new_capacity_until_GC = 0;
 417       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 418       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
 419 
 420       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 421                                                new_capacity_until_GC,
 422                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 423       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 424                                minimum_desired_capacity / (double) K,
 425                                expand_bytes / (double) K,
 426                                MinMetaspaceExpansion / (double) K,
 427                                new_capacity_until_GC / (double) K);
 428     }
 429     return;
 430   }
 431 
 432   // No expansion, now see if we want to shrink
 433   // We would never want to shrink more than this
 434   assert(capacity_until_GC >= minimum_desired_capacity,
 435          SIZE_FORMAT " >= " SIZE_FORMAT,
 436          capacity_until_GC, minimum_desired_capacity);
 437   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 438 
 439   // Should shrinking be considered?
 440   if (MaxMetaspaceFreeRatio < 100) {
 441     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 442     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 443     const double max_tmp = used_after_gc / minimum_used_percentage;
 444     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 445     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 446                                     MetaspaceSize);
 447     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 448                              maximum_free_percentage, minimum_used_percentage);
 449     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 450                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 451 
 452     assert(minimum_desired_capacity <= maximum_desired_capacity,
 453            "sanity check");
 454 
 455     if (capacity_until_GC > maximum_desired_capacity) {
 456       // Capacity too large, compute shrinking size
 457       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 458       // We don't want shrink all the way back to initSize if people call
 459       // System.gc(), because some programs do that between "phases" and then
 460       // we'd just have to grow the heap up again for the next phase.  So we
 461       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 462       // on the third call, and 100% by the fourth call.  But if we recompute
 463       // size without shrinking, it goes back to 0%.
 464       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 465 
 466       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 467 
 468       assert(shrink_bytes <= max_shrink_bytes,
 469              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
 470              shrink_bytes, max_shrink_bytes);
 471       if (current_shrink_factor == 0) {
 472         _shrink_factor = 10;
 473       } else {
 474         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 475       }
 476       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 477                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
 478       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 479                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
 480     }
 481   }
 482 
 483   // Don't shrink unless it's significant
 484   if (shrink_bytes >= MinMetaspaceExpansion &&
 485       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 486     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 487     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 488                                              new_capacity_until_GC,
 489                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 490   }
 491 }
 492 
 493 
 494 
 495 //////  Metaspace methods /////
 496 
 497 
 498 
 499 MetaWord* Metaspace::_compressed_class_space_base = NULL;
 500 size_t Metaspace::_compressed_class_space_size = 0;
 501 const MetaspaceTracer* Metaspace::_tracer = NULL;
 502 bool Metaspace::_initialized = false;
 503 size_t Metaspace::_commit_alignment = 0;
 504 size_t Metaspace::_reserve_alignment = 0;
 505 
 506 DEBUG_ONLY(bool Metaspace::_frozen = false;)
 507 
 508 
 509 #ifdef _LP64
 510 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 511 
 512 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
 513   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
 514   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
 515   // narrow_klass_base is the lower of the metaspace base and the cds base
 516   // (if cds is enabled).  The narrow_klass_shift depends on the distance
 517   // between the lower base and higher address.
 518   address lower_base;
 519   address higher_address;
 520 #if INCLUDE_CDS
 521   if (UseSharedSpaces) {
 522     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
 523                           (address)(metaspace_base + compressed_class_space_size()));
 524     lower_base = MIN2(metaspace_base, cds_base);
 525   } else
 526 #endif
 527   {
 528     higher_address = metaspace_base + compressed_class_space_size();
 529     lower_base = metaspace_base;
 530 
 531     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
 532     // If compressed class space fits in lower 32G, we don't need a base.
 533     if (higher_address <= (address)klass_encoding_max) {
 534       lower_base = 0; // Effectively lower base is zero.
 535     }
 536   }
 537 
 538   CompressedKlassPointers::set_base(lower_base);
 539 
 540   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
 541   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
 542   // how dump time narrow_klass_shift is set. Although, CDS can work
 543   // with zero-shift mode also, to be consistent with AOT it uses
 544   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
 545   // can be used at same time as AOT code.
 546   if (!UseSharedSpaces
 547       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
 548     CompressedKlassPointers::set_shift(0);
 549   } else {
 550     CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
 551   }
 552   AOTLoader::set_narrow_klass_shift();
 553 }
 554 
 555 #if INCLUDE_CDS
 556 // Return TRUE if the specified metaspace_base and cds_base are close enough
 557 // to work with compressed klass pointers.
 558 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
 559   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
 560   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
 561   address lower_base = MIN2((address)metaspace_base, cds_base);
 562   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
 563                                 (address)(metaspace_base + compressed_class_space_size()));
 564   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
 565 }
 566 #endif
 567 
 568 // Try to allocate the metaspace at the requested addr.
 569 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
 570   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
 571   assert(using_class_space(), "called improperly");
 572   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
 573   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
 574          "Metaspace size is too big");
 575   assert_is_aligned(requested_addr, _reserve_alignment);
 576   assert_is_aligned(cds_base, _reserve_alignment);
 577   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
 578 
 579   // Don't use large pages for the class space.
 580   bool large_pages = false;
 581 
 582 #if !(defined(AARCH64) || defined(AIX))
 583   ReservedSpace rs = ReservedSpace(compressed_class_space_size(),
 584                                              _reserve_alignment,
 585                                              large_pages,
 586                                              requested_addr);
 587 #else // AARCH64
 588   ReservedSpace rs;
 589 
 590   // Our compressed klass pointers may fit nicely into the lower 32
 591   // bits.
 592   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
 593     rs = ReservedSpace(compressed_class_space_size(),
 594                                  _reserve_alignment,
 595                                  large_pages,
 596                                  requested_addr);
 597   }
 598 
 599   if (! rs.is_reserved()) {
 600     // Aarch64: Try to align metaspace so that we can decode a compressed
 601     // klass with a single MOVK instruction.  We can do this iff the
 602     // compressed class base is a multiple of 4G.
 603     // Aix: Search for a place where we can find memory. If we need to load
 604     // the base, 4G alignment is helpful, too.
 605     size_t increment = AARCH64_ONLY(4*)G;
 606     for (char *a = align_up(requested_addr, increment);
 607          a < (char*)(1024*G);
 608          a += increment) {
 609       if (a == (char *)(32*G)) {
 610         // Go faster from here on. Zero-based is no longer possible.
 611         increment = 4*G;
 612       }
 613 
 614 #if INCLUDE_CDS
 615       if (UseSharedSpaces
 616           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
 617         // We failed to find an aligned base that will reach.  Fall
 618         // back to using our requested addr.
 619         rs = ReservedSpace(compressed_class_space_size(),
 620                                      _reserve_alignment,
 621                                      large_pages,
 622                                      requested_addr);
 623         break;
 624       }
 625 #endif
 626 
 627       rs = ReservedSpace(compressed_class_space_size(),
 628                                    _reserve_alignment,
 629                                    large_pages,
 630                                    a);
 631       if (rs.is_reserved())
 632         break;
 633     }
 634   }
 635 
 636 #endif // AARCH64
 637 
 638   if (!rs.is_reserved()) {
 639 #if INCLUDE_CDS
 640     if (UseSharedSpaces) {
 641       size_t increment = align_up(1*G, _reserve_alignment);
 642 
 643       // Keep trying to allocate the metaspace, increasing the requested_addr
 644       // by 1GB each time, until we reach an address that will no longer allow
 645       // use of CDS with compressed klass pointers.
 646       char *addr = requested_addr;
 647       while (!rs.is_reserved() && (addr + increment > addr) &&
 648              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
 649         addr = addr + increment;
 650         rs = ReservedSpace(compressed_class_space_size(),
 651                                      _reserve_alignment, large_pages, addr);
 652       }
 653     }
 654 #endif
 655     // If no successful allocation then try to allocate the space anywhere.  If
 656     // that fails then OOM doom.  At this point we cannot try allocating the
 657     // metaspace as if UseCompressedClassPointers is off because too much
 658     // initialization has happened that depends on UseCompressedClassPointers.
 659     // So, UseCompressedClassPointers cannot be turned off at this point.
 660     if (!rs.is_reserved()) {
 661       rs = ReservedSpace(compressed_class_space_size(),
 662                                    _reserve_alignment, large_pages);
 663       if (!rs.is_reserved()) {
 664         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
 665                                               compressed_class_space_size()));
 666       }
 667     }
 668   }
 669 
 670   // If we got here then the metaspace got allocated.
 671   MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
 672 
 673   _compressed_class_space_base = (MetaWord*)rs.base();
 674 
 675 #if INCLUDE_CDS
 676   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
 677   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(rs.base(), cds_base)) {
 678     FileMapInfo::stop_sharing_and_unmap(
 679         "Could not allocate metaspace at a compatible address");
 680   }
 681 #endif
 682   set_narrow_klass_base_and_shift((address)rs.base(),
 683                                   UseSharedSpaces ? (address)cds_base : 0);
 684 
 685   initialize_class_space(rs);
 686 
 687   LogTarget(Trace, gc, metaspace) lt;
 688   if (lt.is_enabled()) {
 689     ResourceMark rm;
 690     LogStream ls(lt);
 691     print_compressed_class_space(&ls, requested_addr);
 692   }
 693 }
 694 
 695 // For UseCompressedClassPointers the class space is reserved above the top of
 696 // the Java heap.  The argument passed in is at the base of the compressed space.
 697 void Metaspace::initialize_class_space(ReservedSpace rs) {
 698 
 699   // The reserved space size may be bigger because of alignment, esp with UseLargePages
 700   assert(rs.size() >= CompressedClassSpaceSize,
 701          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
 702   assert(using_class_space(), "Must be using class space");
 703 
 704   VirtualSpaceList* vsl = new VirtualSpaceList("class space list", rs, CommitLimiter::globalLimiter());
 705   VirtualSpaceList::set_vslist_class(vsl);
 706   ChunkManager* cm = new ChunkManager("class space chunk manager", vsl);
 707   ChunkManager::set_chunkmanager_class(cm);
 708 
 709 }
 710 
 711 
 712 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
 713   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
 714                p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
 715   if (Metaspace::using_class_space()) {
 716     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
 717                  compressed_class_space_size(), p2i(compressed_class_space_base()));
 718     if (requested_addr != 0) {
 719       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
 720     }
 721     st->cr();
 722   }
 723 }
 724 
 725 #endif
 726 
 727 void Metaspace::ergo_initialize() {
 728   if (DumpSharedSpaces) {
 729     // Using large pages when dumping the shared archive is currently not implemented.
 730     FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
 731   }
 732 
 733   size_t page_size = os::vm_page_size();
 734   if (UseLargePages && UseLargePagesInMetaspace) {
 735     page_size = os::large_page_size();
 736   }
 737 
 738   // Commit alignment: (I would rather hide this since this is an implementation detail but we need it
 739   // when calculating the gc threshold).
 740   _commit_alignment  = metaspace::constants::commit_granule_bytes;
 741 
 742   // Reserve alignment: all Metaspace memory mappings are to be aligned to the size of a root chunk.
 743   _reserve_alignment = MAX2(page_size, (size_t)metaspace::chklvl::MAX_CHUNK_BYTE_SIZE);
 744 
 745   assert(is_aligned(_reserve_alignment, os::vm_allocation_granularity()),
 746          "root chunk size must be a multiple of alloc granularity");
 747 
 748   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
 749   // override if MaxMetaspaceSize was set on the command line or not.
 750   // This information is needed later to conform to the specification of the
 751   // java.lang.management.MemoryUsage API.
 752   //
 753   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
 754   // globals.hpp to the aligned value, but this is not possible, since the
 755   // alignment depends on other flags being parsed.
 756   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
 757 
 758   if (MetaspaceSize > MaxMetaspaceSize) {
 759     MetaspaceSize = MaxMetaspaceSize;
 760   }
 761 
 762   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
 763 
 764   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 765 
 766   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
 767   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
 768 
 769   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
 770 
 771   // Note: InitialBootClassLoaderMetaspaceSize is an old parameter which is used to determine the chunk size
 772   // of the first non-class chunk handed to the boot class loader. See metaspace/chunkAllocSequence.hpp.
 773   size_t min_metaspace_sz = align_up(InitialBootClassLoaderMetaspaceSize, _reserve_alignment);
 774   if (UseCompressedClassPointers) {
 775     if (min_metaspace_sz >= MaxMetaspaceSize) {
 776       vm_exit_during_initialization("MaxMetaspaceSize is too small.");
 777     } else if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
 778       FLAG_SET_ERGO(CompressedClassSpaceSize, MaxMetaspaceSize - min_metaspace_sz);
 779     }
 780   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
 781     FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
 782                   min_metaspace_sz);
 783   }
 784 
 785   _compressed_class_space_size = CompressedClassSpaceSize;
 786 }
 787 
 788 void Metaspace::global_initialize() {
 789   MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
 790 
 791 #if INCLUDE_CDS
 792   if (DumpSharedSpaces) {
 793     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
 794   } else if (UseSharedSpaces) {
 795     // If any of the archived space fails to map, UseSharedSpaces
 796     // is reset to false. Fall through to the
 797     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
 798     // metaspace.
 799     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
 800   }
 801 
 802   if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
 803     vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
 804   }
 805 #endif // INCLUDE_CDS
 806 
 807   // Initialize class space:
 808   if (CDS_ONLY(!DumpSharedSpaces && !UseSharedSpaces) NOT_CDS(true)) {
 809 #ifdef _LP64
 810     if (using_class_space()) {
 811       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
 812       allocate_metaspace_compressed_klass_ptrs(base, 0);
 813     }
 814 #endif // _LP64
 815   }
 816 
 817   // Initialize non-class virtual space list, and its chunk manager:
 818   VirtualSpaceList* vsl = new VirtualSpaceList("Non-Class VirtualSpaceList", CommitLimiter::globalLimiter());
 819   VirtualSpaceList::set_vslist_nonclass(vsl);
 820   ChunkManager* cm = new ChunkManager("Non-Class ChunkManager", vsl);
 821   ChunkManager::set_chunkmanager_nonclass(cm);
 822 
 823   _tracer = new MetaspaceTracer();
 824 
 825   _initialized = true;
 826 
 827 }
 828 
 829 void Metaspace::post_initialize() {
 830   // TODO do we really need this? Metaspace should not be allocated during VM initialization.
 831   assert(RunningCounters::committed_words() == 0, "CHECKECHK");
 832   MetaspaceGC::post_initialize();
 833 }
 834 
 835 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 836                               MetaspaceObj::Type type, TRAPS) {
 837   assert(!_frozen, "sanity");
 838   assert(!(DumpSharedSpaces && THREAD->is_VM_thread()), "sanity");
 839 
 840   if (HAS_PENDING_EXCEPTION) {
 841     assert(false, "Should not allocate with exception pending");
 842     return NULL;  // caller does a CHECK_NULL too
 843   }
 844 
 845   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
 846         "ClassLoaderData::the_null_class_loader_data() should have been used.");
 847 
 848   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? metaspace::ClassType : metaspace::NonClassType;
 849 
 850   // Try to allocate metadata.
 851   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 852 
 853   if (result == NULL) {
 854     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
 855 
 856     // Allocation failed.
 857     if (is_init_completed()) {
 858       // Only start a GC if the bootstrapping has completed.
 859       // Try to clean out some heap memory and retry. This can prevent premature
 860       // expansion of the metaspace.
 861       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
 862     }
 863   }
 864 
 865   if (result == NULL) {
 866     if (DumpSharedSpaces) {
 867       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
 868       // We should abort to avoid generating a potentially bad archive.
 869       vm_exit_during_cds_dumping(err_msg("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
 870           MetaspaceObj::type_name(type), word_size * BytesPerWord),
 871         err_msg("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize));
 872     }
 873     report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
 874     assert(HAS_PENDING_EXCEPTION, "sanity");
 875     return NULL;
 876   }
 877 
 878   // Zero initialize.
 879   Copy::fill_to_words((HeapWord*)result, word_size, 0);
 880 
 881   return result;
 882 }
 883 
 884 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
 885   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
 886 
 887   // If result is still null, we are out of memory.
 888   Log(gc, metaspace, freelist, oom) log;
 889   if (log.is_info()) {
 890     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
 891              is_class(mdtype) ? "class" : "data", word_size);
 892     ResourceMark rm;
 893     if (log.is_debug()) {
 894       if (loader_data->metaspace_or_null() != NULL) {
 895         LogStream ls(log.debug());
 896         loader_data->print_value_on(&ls);
 897       }
 898     }
 899     LogStream ls(log.info());
 900     // In case of an OOM, log out a short but still useful report.
 901     MetaspaceUtils::print_basic_report(&ls, 0);
 902   }
 903 
 904   // Which limit did we hit? CompressedClassSpaceSize or MaxMetaspaceSize?
 905   bool out_of_compressed_class_space = false;
 906   if (is_class(mdtype)) {
 907     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
 908     out_of_compressed_class_space =
 909       MetaspaceUtils::committed_bytes(metaspace::ClassType) +
 910       // TODO: Okay this is just cheesy.
 911       // Of course this may fail and return incorrect results.
 912       // Think this over - we need some clean way to remember which limit
 913       // exactly we hit during an allocation. Some sort of allocation context structure?
 914       align_up(word_size * BytesPerWord, 4 * M) >
 915       CompressedClassSpaceSize;
 916   }
 917 
 918   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 919   const char* space_string = out_of_compressed_class_space ?
 920     "Compressed class space" : "Metaspace";
 921 
 922   report_java_out_of_memory(space_string);
 923 
 924   if (JvmtiExport::should_post_resource_exhausted()) {
 925     JvmtiExport::post_resource_exhausted(
 926         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 927         space_string);
 928   }
 929 
 930   if (!is_init_completed()) {
 931     vm_exit_during_initialization("OutOfMemoryError", space_string);
 932   }
 933 
 934   if (out_of_compressed_class_space) {
 935     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
 936   } else {
 937     THROW_OOP(Universe::out_of_memory_error_metaspace());
 938   }
 939 }
 940 
 941 void Metaspace::purge() {
 942 // Todo
 943 }
 944 
 945 bool Metaspace::contains(const void* ptr) {
 946   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
 947     return true;
 948   }
 949   return contains_non_shared(ptr);
 950 }
 951 
 952 bool Metaspace::contains_non_shared(const void* ptr) {
 953   if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) {
 954      return true;
 955   }
 956 
 957   return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
 958 }