1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Helper function that does a bunch of checks for a chunk.
  59 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  60 
  61 // Given a Metachunk, update its in-use information (both in the
  62 // chunk and the occupancy map).
  63 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  64 
  65 size_t const allocation_from_dictionary_limit = 4 * K;
  66 
  67 MetaWord* last_allocated = 0;
  68 
  69 size_t Metaspace::_compressed_class_space_size;
  70 const MetaspaceTracer* Metaspace::_tracer = NULL;
  71 
  72 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  73 
  74 // Internal statistics.
  75 #ifdef ASSERT
  76 struct {
  77   // Number of allocations (from outside)
  78   uintx num_allocs;
  79   // Number of times a ClassLoaderMetaspace was born.
  80   uintx num_metaspace_births;
  81   // Number of times a ClassLoaderMetaspace died.
  82   uintx num_metaspace_deaths;
  83   // Number of times VirtualSpaceListNodes were created...
  84   uintx num_vsnodes_created;
  85   // ... and purged.
  86   uintx num_vsnodes_purged;
  87   // Number of times we expanded the committed section of the space.
  88   uintx num_committed_space_expanded;
  89   // Number of deallocations (e.g. retransformClasses etc)
  90   uintx num_deallocs;
  91   // Number of times an alloc was satisfied from deallocated blocks.
  92   uintx num_allocs_from_deallocated_blocks;
  93 } g_internal_statistics;
  94 #endif
  95 
  96 enum ChunkSizes {    // in words.
  97   ClassSpecializedChunk = 128,
  98   SpecializedChunk = 128,
  99   ClassSmallChunk = 256,
 100   SmallChunk = 512,
 101   ClassMediumChunk = 4 * K,
 102   MediumChunk = 8 * K
 103 };
 104 
 105 // Returns size of this chunk type.
 106 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
 107   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
 108   size_t size = 0;
 109   if (is_class) {
 110     switch(chunktype) {
 111       case SpecializedIndex: size = ClassSpecializedChunk; break;
 112       case SmallIndex: size = ClassSmallChunk; break;
 113       case MediumIndex: size = ClassMediumChunk; break;
 114       default:
 115         ShouldNotReachHere();
 116     }
 117   } else {
 118     switch(chunktype) {
 119       case SpecializedIndex: size = SpecializedChunk; break;
 120       case SmallIndex: size = SmallChunk; break;
 121       case MediumIndex: size = MediumChunk; break;
 122       default:
 123         ShouldNotReachHere();
 124     }
 125   }
 126   return size;
 127 }
 128 
 129 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 130   if (is_class) {
 131     if (size == ClassSpecializedChunk) {
 132       return SpecializedIndex;
 133     } else if (size == ClassSmallChunk) {
 134       return SmallIndex;
 135     } else if (size == ClassMediumChunk) {
 136       return MediumIndex;
 137     } else if (size > ClassMediumChunk) {
 138       // A valid humongous chunk size is a multiple of the smallest chunk size.
 139       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 140       return HumongousIndex;
 141     }
 142   } else {
 143     if (size == SpecializedChunk) {
 144       return SpecializedIndex;
 145     } else if (size == SmallChunk) {
 146       return SmallIndex;
 147     } else if (size == MediumChunk) {
 148       return MediumIndex;
 149     } else if (size > MediumChunk) {
 150       // A valid humongous chunk size is a multiple of the smallest chunk size.
 151       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 152       return HumongousIndex;
 153     }
 154   }
 155   ShouldNotReachHere();
 156   return (ChunkIndex)-1;
 157 }
 158 
 159 
 160 static ChunkIndex next_chunk_index(ChunkIndex i) {
 161   assert(i < NumberOfInUseLists, "Out of bound");
 162   return (ChunkIndex) (i+1);
 163 }
 164 
 165 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 166   assert(i > ZeroIndex, "Out of bound");
 167   return (ChunkIndex) (i-1);
 168 }
 169 
 170 static const char* space_type_name(Metaspace::MetaspaceType t) {
 171   const char* s = NULL;
 172   switch (t) {
 173   case Metaspace::StandardMetaspaceType: s = "Standard"; break;
 174   case Metaspace::BootMetaspaceType: s = "Boot"; break;
 175   case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
 176   case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
 177   }
 178   assert(s != NULL, "Invalid space type");
 179   return s;
 180 }
 181 
 182 static const char* scale_unit(size_t scale) {
 183   switch(scale) {
 184     case 1: return "bytes";
 185     case sizeof(MetaWord): return "words";
 186     case K: return "KB";
 187     case M: return "MB";
 188     case G: return "GB";
 189     default:
 190       ShouldNotReachHere();
 191       return NULL;
 192   }
 193 }
 194 
 195 // Print a size, in bytes, scaled.
 196 static void print_scaled_bytes(outputStream* st, size_t byte_size, size_t scale = 0, int width = -1) {
 197   if (scale == 0) {
 198     // Dynamic mode. Choose scale for this value.
 199     if (byte_size == 0) {
 200       // Zero values are printed as bytes.
 201       scale = 1;
 202     } else {
 203       if (byte_size >= G) {
 204         scale = G;
 205       } else if (byte_size >= M) {
 206         scale = M;
 207       } else if (byte_size >= K) {
 208         scale = K;
 209       } else {
 210         scale = 1;
 211       }
 212     }
 213     return print_scaled_bytes(st, byte_size, scale, width);
 214   }
 215 
 216 #ifdef ASSERT
 217   assert(scale == 1 || scale == sizeof(MetaWord) || scale == K || scale == M || scale == G, "Invalid scale");
 218   // Special case: printing wordsize should only be done with word-sized values
 219   if (scale == sizeof(MetaWord)) {
 220     assert(byte_size % sizeof(MetaWord) == 0, "not word sized");
 221   }
 222 #endif
 223 
 224   if (scale == 1) {
 225     st->print("%*" PRIuPTR " bytes", width, byte_size);
 226   } else if (scale == sizeof(MetaWord)) {
 227     st->print("%*" PRIuPTR " words", width, byte_size / sizeof(MetaWord));
 228   } else {
 229     const char* const unit = scale_unit(scale);
 230     float display_value = (float) byte_size / scale;
 231     // Since we use width to display a number with two trailing digits, increase it a bit.
 232     width += 3;
 233     // Prevent very small but non-null values showing up as 0.00.
 234     if (byte_size > 0 && display_value < 0.01f) {
 235       st->print("%*s %s", width, "<0.01", unit);
 236     } else {
 237       st->print("%*.2f %s", width, display_value, unit);
 238     }
 239   }
 240 }
 241 
 242 // Print a size, in words, scaled.
 243 static void print_scaled_words(outputStream* st, size_t word_size, size_t scale = 0, int width = -1) {
 244   print_scaled_bytes(st, word_size * sizeof(MetaWord), scale, width);
 245 }
 246 
 247 static void print_percentage(outputStream* st, size_t total, size_t part) {
 248   if (total == 0) {
 249     st->print("  ?%%");
 250   } else if (part == 0) {
 251     st->print("  0%%");
 252   } else {
 253     float p = ((float)part / total) * 100.0f;
 254     if (p < 1.0f) {
 255       st->print(" <1%%");
 256     } else {
 257       st->print("%3.0f%%", p);
 258     }
 259   }
 260 }
 261 
 262 // Convenience helper: prints a size value and a percentage.
 263 static void print_scaled_words_and_percentage(outputStream* st, size_t word_size, size_t compare_word_size, size_t scale = 0, int width = -1) {
 264   print_scaled_words(st, word_size, scale, width);
 265   st->print(" (");
 266   print_percentage(st, compare_word_size, word_size);
 267   st->print(")");
 268 }
 269 
 270 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 271 uint MetaspaceGC::_shrink_factor = 0;
 272 bool MetaspaceGC::_should_concurrent_collect = false;
 273 
 274 /// statistics ///////
 275 
 276 // Contains statistics for a number of free chunks.
 277 class FreeChunksStatistics {
 278   uintx _num;         // Number of chunks
 279   size_t _cap;        // Total capacity, in words
 280 
 281 public:
 282   FreeChunksStatistics() : _num(0), _cap(0) {}
 283 
 284   void reset() {
 285     _num = 0; _cap = 0;
 286   }
 287 
 288   uintx num() const { return _num; }
 289   size_t cap() const { return _cap; }
 290 
 291   void add(uintx n, size_t s) { _num += n; _cap += s; }
 292   void add(const FreeChunksStatistics& other) {
 293     _num += other._num;
 294     _cap += other._cap;
 295   }
 296 
 297   void print_on(outputStream* st, size_t scale) const {
 298     st->print(UINTX_FORMAT, _num);
 299     st->print(" chunks, total capacity ");
 300     print_scaled_words(st, _cap, scale);
 301   }
 302 
 303 }; // end: FreeChunksStatistics
 304 
 305 // Contains statistics for a ChunkManager.
 306 class ChunkManagerStatistics {
 307 
 308   FreeChunksStatistics _chunk_stats[NumberOfInUseLists];
 309 
 310 public:
 311 
 312   // Free chunk statistics, by chunk index.
 313   const FreeChunksStatistics& chunk_stats(ChunkIndex index) const   { return _chunk_stats[index]; }
 314   FreeChunksStatistics& chunk_stats(ChunkIndex index)               { return _chunk_stats[index]; }
 315 
 316   void reset() {
 317     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 318       _chunk_stats[i].reset();
 319     }
 320   }
 321 
 322   size_t total_capacity() const {
 323     return _chunk_stats[SpecializedIndex].cap() +
 324         _chunk_stats[SmallIndex].cap() +
 325         _chunk_stats[MediumIndex].cap() +
 326         _chunk_stats[HumongousIndex].cap();
 327   }
 328 
 329   void print_on(outputStream* st, size_t scale) const {
 330     FreeChunksStatistics totals;
 331     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 332       st->cr();
 333       st->print("%12s chunks: ", chunk_size_name(i));
 334       if (_chunk_stats[i].num() > 0) {
 335         st->print(UINTX_FORMAT_W(4) ", capacity ", _chunk_stats[i].num());
 336         print_scaled_words(st, _chunk_stats[i].cap(), scale);
 337       } else {
 338         st->print("(none)");
 339       }
 340       totals.add(_chunk_stats[i]);
 341     }
 342     st->cr();
 343     st->print("%19s: " UINTX_FORMAT_W(4) ", capacity=", "Total", totals.num());
 344     print_scaled_words(st, totals.cap(), scale);
 345     st->cr();
 346   }
 347 
 348 }; // ChunkManagerStatistics
 349 
 350 // Contains statistics for a number of chunks in use.
 351 // Each chunk has a used and free portion; however, there are current chunks (serving
 352 // potential future metaspace allocations) and non-current chunks. Unused portion of the
 353 // former is counted as free, unused portion of the latter counts as waste.
 354 class UsedChunksStatistics {
 355   uintx _num;     // Number of chunks
 356   size_t _cap;    // Total capacity, in words
 357   size_t _used;   // Total used area, in words
 358   size_t _free;   // Total free area (unused portions of current chunks), in words
 359   size_t _waste;  // Total waste area (unused portions of non-current chunks), in words
 360 
 361 public:
 362 
 363   UsedChunksStatistics()
 364     : _num(0), _cap(0), _used(0), _free(0), _waste(0)
 365   {}
 366 
 367   void reset() {
 368     _num = 0;
 369     _cap = _used = _free = _waste = 0;
 370   }
 371 
 372   uintx num() const { return _num; }
 373 
 374   // Total capacity, in words
 375   size_t cap() const { return _cap; }
 376 
 377   // Total used area, in words
 378   size_t used() const { return _used; }
 379 
 380   // Total free area (unused portions of current chunks), in words
 381   size_t free() const { return _free; }
 382 
 383   // Total waste area (unused portions of non-current chunks), in words
 384   size_t waste() const { return _waste; }
 385 
 386   void add_num(uintx n) { _num += n; }
 387   void add_cap(size_t s) { _cap += s; }
 388   void add_used(size_t s) { _used += s; }
 389   void add_free(size_t s) { _free += s; }
 390   void add_waste(size_t s) { _waste += s; }
 391 
 392   void add(const UsedChunksStatistics& other) {
 393     _num += other._num;
 394     _cap += other._cap;
 395     _used += other._used;
 396     _free += other._free;
 397     _waste += other._waste;
 398   }
 399 
 400   void print_on(outputStream* st, size_t scale) const {
 401     int col = st->position();
 402     st->print(UINTX_FORMAT_W(3) " chunk%s, ", _num, _num != 1 ? "s" : "");
 403     if (_num > 0) {
 404       col += 12; st->fill_to(col);
 405 
 406       print_scaled_words(st, _cap, scale, 5);
 407       st->print(" capacity, ");
 408 
 409       col += 18; st->fill_to(col);
 410       print_scaled_words_and_percentage(st, _used, _cap, scale, 5);
 411       st->print(" used, ");
 412 
 413       col += 20; st->fill_to(col);
 414       print_scaled_words_and_percentage(st, _free, _cap, scale, 5);
 415       st->print(" free, ");
 416 
 417       col += 20; st->fill_to(col);
 418       print_scaled_words_and_percentage(st, _waste, _cap, scale, 5);
 419       st->print(" waste");
 420     }
 421   }
 422 
 423 }; // UsedChunksStatistics
 424 
 425 // Class containing statistics for one or more space managers.
 426 class SpaceManagerStatistics {
 427 
 428   UsedChunksStatistics _chunk_stats[NumberOfInUseLists];
 429   uintx _free_blocks_num;
 430   size_t _free_blocks_cap_words;
 431 
 432 public:
 433 
 434   SpaceManagerStatistics() { reset(); }
 435 
 436   void reset() {
 437     for (int i = 0; i < NumberOfInUseLists; i ++) {
 438       _chunk_stats[i].reset();
 439       _free_blocks_num = 0; _free_blocks_cap_words = 0;
 440     }
 441   }
 442 
 443   void add_free_blocks(uintx num, size_t cap) {
 444     _free_blocks_num += num;
 445     _free_blocks_cap_words += cap;
 446   }
 447 
 448   // Chunk statistics by chunk index
 449   const UsedChunksStatistics& chunk_stats(ChunkIndex index) const   { return _chunk_stats[index]; }
 450   UsedChunksStatistics& chunk_stats(ChunkIndex index)               { return _chunk_stats[index]; }
 451 
 452   uintx free_blocks_num () const { return _free_blocks_num; }
 453   size_t free_blocks_cap_words () const { return _free_blocks_cap_words; }
 454 
 455   // Returns total chunk statistics over all chunk types.
 456   UsedChunksStatistics totals() const {
 457     UsedChunksStatistics stat;
 458     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 459       stat.add(_chunk_stats[i]);
 460     }
 461     return stat;
 462   }
 463 
 464   void add(const SpaceManagerStatistics& other) {
 465     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 466       _chunk_stats[i].add(other._chunk_stats[i]);
 467     }
 468     _free_blocks_num += other._free_blocks_num;
 469     _free_blocks_cap_words += other._free_blocks_cap_words;
 470   }
 471 
 472   void print_on(outputStream* st, size_t scale,  bool detailed) const {
 473     UsedChunksStatistics totals;
 474     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 475       totals.add(_chunk_stats[i]);
 476     }
 477     streamIndentor sti(st);
 478     if (detailed) {
 479       st->cr_indent();
 480     }
 481     totals.print_on(st, scale);
 482     if (_free_blocks_num > 0) {
 483       if (detailed) {
 484         st->cr_indent();
 485       } else {
 486         st->print(", ");
 487       }
 488       st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num);
 489       print_scaled_words(st, _free_blocks_cap_words, scale);
 490     }
 491     if (detailed) {
 492       st->cr_indent();
 493       st->print("By chunk type:");
 494       {
 495         streamIndentor sti2(st);
 496         for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 497           st->cr_indent();
 498           st->print("%15s: ", chunk_size_name(i));
 499           if (_chunk_stats[i].num() == 0) {
 500             st->print(" (none)");
 501           } else {
 502             _chunk_stats[i].print_on(st, scale);
 503           }
 504         }
 505       }
 506     }
 507   }
 508 
 509 }; // SpaceManagerStatistics
 510 
 511 class ClassLoaderMetaspaceStatistics {
 512 
 513   SpaceManagerStatistics _sm_stats[Metaspace::MetadataTypeCount];
 514 
 515 public:
 516 
 517   ClassLoaderMetaspaceStatistics() { reset(); }
 518 
 519   void reset() {
 520     nonclass_sm_stats().reset();
 521     if (Metaspace::using_class_space()) {
 522       class_sm_stats().reset();
 523     }
 524   }
 525 
 526   const SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType) const { return _sm_stats[mdType]; }
 527   SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType)             { return _sm_stats[mdType]; }
 528 
 529   const SpaceManagerStatistics& nonclass_sm_stats() const { return sm_stats(Metaspace::NonClassType); }
 530   SpaceManagerStatistics& nonclass_sm_stats()             { return sm_stats(Metaspace::NonClassType); }
 531   const SpaceManagerStatistics& class_sm_stats() const    { return sm_stats(Metaspace::ClassType); }
 532   SpaceManagerStatistics& class_sm_stats()                { return sm_stats(Metaspace::ClassType); }
 533 
 534   // Returns total space manager statistics for both class and non-class metaspace
 535   SpaceManagerStatistics totals() const {
 536     SpaceManagerStatistics stats;
 537     stats.add(nonclass_sm_stats());
 538     if (Metaspace::using_class_space()) {
 539       stats.add(class_sm_stats());
 540     }
 541     return stats;
 542   }
 543 
 544   void add(const ClassLoaderMetaspaceStatistics& other) {
 545     nonclass_sm_stats().add(other.nonclass_sm_stats());
 546     if (Metaspace::using_class_space()) {
 547       class_sm_stats().add(other.class_sm_stats());
 548     }
 549   }
 550 
 551   void print_on(outputStream* st, size_t scale, bool detailed) const {
 552     streamIndentor sti(st);
 553     st->cr_indent();
 554     if (Metaspace::using_class_space()) {
 555       st->print("Non-Class: ");
 556     }
 557     nonclass_sm_stats().print_on(st, scale, detailed);
 558     if (Metaspace::using_class_space()) {
 559       st->cr_indent();
 560       st->print("Class:     ");
 561       class_sm_stats().print_on(st, scale, detailed);
 562     }
 563     st->cr();
 564   }
 565 
 566 }; // ClassLoaderMetaspaceStatistics
 567 
 568 
 569 typedef class FreeList<Metachunk> ChunkList;
 570 
 571 // Manages the global free lists of chunks.
 572 class ChunkManager : public CHeapObj<mtInternal> {
 573   friend class TestVirtualSpaceNodeTest;
 574 
 575   // Free list of chunks of different sizes.
 576   //   SpecializedChunk
 577   //   SmallChunk
 578   //   MediumChunk
 579   ChunkList _free_chunks[NumberOfFreeLists];
 580 
 581   // Whether or not this is the class chunkmanager.
 582   const bool _is_class;
 583 
 584   // Return non-humongous chunk list by its index.
 585   ChunkList* free_chunks(ChunkIndex index);
 586 
 587   // Returns non-humongous chunk list for the given chunk word size.
 588   ChunkList* find_free_chunks_list(size_t word_size);
 589 
 590   //   HumongousChunk
 591   ChunkTreeDictionary _humongous_dictionary;
 592 
 593   // Returns the humongous chunk dictionary.
 594   ChunkTreeDictionary* humongous_dictionary() {
 595     return &_humongous_dictionary;
 596   }
 597 
 598   // Size, in metaspace words, of all chunks managed by this ChunkManager
 599   size_t _free_chunks_total;
 600   // Number of chunks in this ChunkManager
 601   size_t _free_chunks_count;
 602 
 603   // Update counters after a chunk was added or removed removed.
 604   void account_for_added_chunk(const Metachunk* c);
 605   void account_for_removed_chunk(const Metachunk* c);
 606 
 607   // Debug support
 608 
 609   size_t sum_free_chunks();
 610   size_t sum_free_chunks_count();
 611 
 612   void locked_verify_free_chunks_total();
 613   void slow_locked_verify_free_chunks_total() {
 614     if (VerifyMetaspace) {
 615       locked_verify_free_chunks_total();
 616     }
 617   }
 618   void locked_verify_free_chunks_count();
 619   void slow_locked_verify_free_chunks_count() {
 620     if (VerifyMetaspace) {
 621       locked_verify_free_chunks_count();
 622     }
 623   }
 624   void verify_free_chunks_count();
 625 
 626   // Given a pointer to a chunk, attempts to merge it with neighboring
 627   // free chunks to form a bigger chunk. Returns true if successful.
 628   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 629 
 630   // Helper for chunk merging:
 631   //  Given an address range with 1-n chunks which are all supposed to be
 632   //  free and hence currently managed by this ChunkManager, remove them
 633   //  from this ChunkManager and mark them as invalid.
 634   // - This does not correct the occupancy map.
 635   // - This does not adjust the counters in ChunkManager.
 636   // - Does not adjust container count counter in containing VirtualSpaceNode.
 637   // Returns number of chunks removed.
 638   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 639 
 640   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 641   // split up the larger chunk into n smaller chunks, at least one of which should be
 642   // the target chunk of target chunk size. The smaller chunks, including the target
 643   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 644   // Note that this chunk is supposed to be removed from the freelist right away.
 645   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 646 
 647  public:
 648 
 649   ChunkManager(bool is_class)
 650       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 651     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 652     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 653     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 654   }
 655 
 656   // Add or delete (return) a chunk to the global freelist.
 657   Metachunk* chunk_freelist_allocate(size_t word_size);
 658 
 659   // Map a size to a list index assuming that there are lists
 660   // for special, small, medium, and humongous chunks.
 661   ChunkIndex list_index(size_t size);
 662 
 663   // Map a given index to the chunk size.
 664   size_t size_by_index(ChunkIndex index) const;
 665 
 666   bool is_class() const { return _is_class; }
 667 
 668   // Convenience accessors.
 669   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 670   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 671   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 672 
 673   // Take a chunk from the ChunkManager. The chunk is expected to be in
 674   // the chunk manager (the freelist if non-humongous, the dictionary if
 675   // humongous).
 676   void remove_chunk(Metachunk* chunk);
 677 
 678   // Return a single chunk of type index to the ChunkManager.
 679   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 680 
 681   // Add the simple linked list of chunks to the freelist of chunks
 682   // of type index.
 683   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 684 
 685   // Total of the space in the free chunks list
 686   size_t free_chunks_total_words();
 687   size_t free_chunks_total_bytes();
 688 
 689   // Number of chunks in the free chunks list
 690   size_t free_chunks_count();
 691 
 692   // Remove from a list by size.  Selects list based on size of chunk.
 693   Metachunk* free_chunks_get(size_t chunk_word_size);
 694 
 695 #define index_bounds_check(index)                                         \
 696   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 697 
 698   size_t num_free_chunks(ChunkIndex index) const {
 699     index_bounds_check(index);
 700 
 701     if (index == HumongousIndex) {
 702       return _humongous_dictionary.total_free_blocks();
 703     }
 704 
 705     ssize_t count = _free_chunks[index].count();
 706     return count == -1 ? 0 : (size_t) count;
 707   }
 708 
 709   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 710     index_bounds_check(index);
 711 
 712     size_t word_size = 0;
 713     if (index == HumongousIndex) {
 714       word_size = _humongous_dictionary.total_size();
 715     } else {
 716       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 717       word_size = size_per_chunk_in_words * num_free_chunks(index);
 718     }
 719 
 720     return word_size * BytesPerWord;
 721   }
 722 
 723   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 724     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 725                                          num_free_chunks(SmallIndex),
 726                                          num_free_chunks(MediumIndex),
 727                                          num_free_chunks(HumongousIndex),
 728                                          size_free_chunks_in_bytes(SpecializedIndex),
 729                                          size_free_chunks_in_bytes(SmallIndex),
 730                                          size_free_chunks_in_bytes(MediumIndex),
 731                                          size_free_chunks_in_bytes(HumongousIndex));
 732   }
 733 
 734   // Debug support
 735   void verify();
 736   void slow_verify() {
 737     if (VerifyMetaspace) {
 738       verify();
 739     }
 740   }
 741   void locked_verify();
 742   void slow_locked_verify() {
 743     if (VerifyMetaspace) {
 744       locked_verify();
 745     }
 746   }
 747   void verify_free_chunks_total();
 748 
 749   void locked_print_free_chunks(outputStream* st);
 750   void locked_print_sum_free_chunks(outputStream* st);
 751 
 752   void print_on(outputStream* st) const;
 753 
 754   void get_statistics(ChunkManagerStatistics* out) const;
 755 
 756 };
 757 
 758 class SmallBlocks : public CHeapObj<mtClass> {
 759   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 760   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 761 
 762  private:
 763   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 764 
 765   FreeList<Metablock>& list_at(size_t word_size) {
 766     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 767     return _small_lists[word_size - _small_block_min_size];
 768   }
 769 
 770  public:
 771   SmallBlocks() {
 772     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 773       uint k = i - _small_block_min_size;
 774       _small_lists[k].set_size(i);
 775     }
 776   }
 777 
 778   size_t total_size() const {
 779     size_t result = 0;
 780     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 781       uint k = i - _small_block_min_size;
 782       result = result + _small_lists[k].count() * _small_lists[k].size();
 783     }
 784     return result;
 785   }
 786 
 787   uintx total_num_blocks() const {
 788     uintx result = 0;
 789     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 790       uint k = i - _small_block_min_size;
 791       result = result + _small_lists[k].count();
 792     }
 793     return result;
 794   }
 795 
 796   static uint small_block_max_size() { return _small_block_max_size; }
 797   static uint small_block_min_size() { return _small_block_min_size; }
 798 
 799   MetaWord* get_block(size_t word_size) {
 800     if (list_at(word_size).count() > 0) {
 801       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 802       return new_block;
 803     } else {
 804       return NULL;
 805     }
 806   }
 807   void return_block(Metablock* free_chunk, size_t word_size) {
 808     list_at(word_size).return_chunk_at_head(free_chunk, false);
 809     assert(list_at(word_size).count() > 0, "Should have a chunk");
 810   }
 811 
 812   void print_on(outputStream* st) const {
 813     st->print_cr("SmallBlocks:");
 814     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 815       uint k = i - _small_block_min_size;
 816       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 817     }
 818   }
 819 };
 820 
 821 // Used to manage the free list of Metablocks (a block corresponds
 822 // to the allocation of a quantum of metadata).
 823 class BlockFreelist : public CHeapObj<mtClass> {
 824   BlockTreeDictionary* const _dictionary;
 825   SmallBlocks* _small_blocks;
 826 
 827   // Only allocate and split from freelist if the size of the allocation
 828   // is at least 1/4th the size of the available block.
 829   const static int WasteMultiplier = 4;
 830 
 831   // Accessors
 832   BlockTreeDictionary* dictionary() const { return _dictionary; }
 833   SmallBlocks* small_blocks() {
 834     if (_small_blocks == NULL) {
 835       _small_blocks = new SmallBlocks();
 836     }
 837     return _small_blocks;
 838   }
 839 
 840  public:
 841   BlockFreelist();
 842   ~BlockFreelist();
 843 
 844   // Get and return a block to the free list
 845   MetaWord* get_block(size_t word_size);
 846   void return_block(MetaWord* p, size_t word_size);
 847 
 848   size_t total_size() const  {
 849     size_t result = dictionary()->total_size();
 850     if (_small_blocks != NULL) {
 851       result = result + _small_blocks->total_size();
 852     }
 853     return result;
 854   }
 855 
 856   uintx num_blocks() const {
 857     uintx result = dictionary()->total_free_blocks();
 858     if (_small_blocks != NULL) {
 859       result = result + _small_blocks->total_num_blocks();
 860     }
 861     return result;
 862   }
 863 
 864   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 865   void print_on(outputStream* st) const;
 866 };
 867 
 868 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 869 template <typename T> struct all_ones  { static const T value; };
 870 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 871 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 872 
 873 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 874 // keeps information about
 875 // - where a chunk starts
 876 // - whether a chunk is in-use or free
 877 // A bit in this bitmap represents one range of memory in the smallest
 878 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 879 class OccupancyMap : public CHeapObj<mtInternal> {
 880 
 881   // The address range this map covers.
 882   const MetaWord* const _reference_address;
 883   const size_t _word_size;
 884 
 885   // The word size of a specialized chunk, aka the number of words one
 886   // bit in this map represents.
 887   const size_t _smallest_chunk_word_size;
 888 
 889   // map data
 890   // Data are organized in two bit layers:
 891   // The first layer is the chunk-start-map. Here, a bit is set to mark
 892   // the corresponding region as the head of a chunk.
 893   // The second layer is the in-use-map. Here, a set bit indicates that
 894   // the corresponding belongs to a chunk which is in use.
 895   uint8_t* _map[2];
 896 
 897   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 898 
 899   // length, in bytes, of bitmap data
 900   size_t _map_size;
 901 
 902   // Returns true if bit at position pos at bit-layer layer is set.
 903   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 904     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 905     const unsigned byteoffset = pos / 8;
 906     assert(byteoffset < _map_size,
 907            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 908     const unsigned mask = 1 << (pos % 8);
 909     return (_map[layer][byteoffset] & mask) > 0;
 910   }
 911 
 912   // Changes bit at position pos at bit-layer layer to value v.
 913   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 914     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 915     const unsigned byteoffset = pos / 8;
 916     assert(byteoffset < _map_size,
 917            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 918     const unsigned mask = 1 << (pos % 8);
 919     if (v) {
 920       _map[layer][byteoffset] |= mask;
 921     } else {
 922       _map[layer][byteoffset] &= ~mask;
 923     }
 924   }
 925 
 926   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 927   // pos is 32/64 aligned and num_bits is 32/64.
 928   // This is the typical case when coalescing to medium chunks, whose size is
 929   // 32 or 64 times the specialized chunk size (depending on class or non class
 930   // case), so they occupy 64 bits which should be 64bit aligned, because
 931   // chunks are chunk-size aligned.
 932   template <typename T>
 933   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 934     assert(_map_size > 0, "not initialized");
 935     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 936     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 937     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 938     const size_t byteoffset = pos / 8;
 939     assert(byteoffset <= (_map_size - sizeof(T)),
 940            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 941     const T w = *(T*)(_map[layer] + byteoffset);
 942     return w > 0 ? true : false;
 943   }
 944 
 945   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 946   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 947     if (pos % 32 == 0 && num_bits == 32) {
 948       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 949     } else if (pos % 64 == 0 && num_bits == 64) {
 950       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 951     } else {
 952       for (unsigned n = 0; n < num_bits; n ++) {
 953         if (get_bit_at_position(pos + n, layer)) {
 954           return true;
 955         }
 956       }
 957     }
 958     return false;
 959   }
 960 
 961   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 962   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 963     assert(word_size % _smallest_chunk_word_size == 0,
 964         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 965     const unsigned pos = get_bitpos_for_address(p);
 966     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 967     return is_any_bit_set_in_region(pos, num_bits, layer);
 968   }
 969 
 970   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 971   // pos is 32/64 aligned and num_bits is 32/64.
 972   // This is the typical case when coalescing to medium chunks, whose size
 973   // is 32 or 64 times the specialized chunk size (depending on class or non
 974   // class case), so they occupy 64 bits which should be 64bit aligned,
 975   // because chunks are chunk-size aligned.
 976   template <typename T>
 977   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 978     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 979            (unsigned)(sizeof(T) * 8), pos);
 980     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 981            num_bits, (unsigned)(sizeof(T) * 8));
 982     const size_t byteoffset = pos / 8;
 983     assert(byteoffset <= (_map_size - sizeof(T)),
 984            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 985     T* const pw = (T*)(_map[layer] + byteoffset);
 986     *pw = v ? all_ones<T>::value : (T) 0;
 987   }
 988 
 989   // Set all bits in a region starting at pos to a value.
 990   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 991     assert(_map_size > 0, "not initialized");
 992     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 993     if (pos % 32 == 0 && num_bits == 32) {
 994       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 995     } else if (pos % 64 == 0 && num_bits == 64) {
 996       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 997     } else {
 998       for (unsigned n = 0; n < num_bits; n ++) {
 999         set_bit_at_position(pos + n, layer, v);
1000       }
1001     }
1002   }
1003 
1004   // Helper: sets all bits in a region [p, p+word_size).
1005   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
1006     assert(word_size % _smallest_chunk_word_size == 0,
1007         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
1008     const unsigned pos = get_bitpos_for_address(p);
1009     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
1010     set_bits_of_region(pos, num_bits, layer, v);
1011   }
1012 
1013   // Helper: given an address, return the bit position representing that address.
1014   unsigned get_bitpos_for_address(const MetaWord* p) const {
1015     assert(_reference_address != NULL, "not initialized");
1016     assert(p >= _reference_address && p < _reference_address + _word_size,
1017            "Address %p out of range for occupancy map [%p..%p).",
1018             p, _reference_address, _reference_address + _word_size);
1019     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
1020            "Address not aligned (%p).", p);
1021     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
1022     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
1023     return (unsigned) d;
1024   }
1025 
1026  public:
1027 
1028   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
1029     _reference_address(reference_address), _word_size(word_size),
1030     _smallest_chunk_word_size(smallest_chunk_word_size) {
1031     assert(reference_address != NULL, "invalid reference address");
1032     assert(is_aligned(reference_address, smallest_chunk_word_size),
1033            "Reference address not aligned to smallest chunk size.");
1034     assert(is_aligned(word_size, smallest_chunk_word_size),
1035            "Word_size shall be a multiple of the smallest chunk size.");
1036     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
1037     size_t num_bits = word_size / smallest_chunk_word_size;
1038     _map_size = (num_bits + 7) / 8;
1039     assert(_map_size * 8 >= num_bits, "sanity");
1040     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
1041     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
1042     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
1043     memset(_map[1], 0, _map_size);
1044     memset(_map[0], 0, _map_size);
1045     // Sanity test: the first respectively last possible chunk start address in
1046     // the covered range shall map to the first and last bit in the bitmap.
1047     assert(get_bitpos_for_address(reference_address) == 0,
1048       "First chunk address in range must map to fist bit in bitmap.");
1049     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
1050       "Last chunk address in range must map to last bit in bitmap.");
1051   }
1052 
1053   ~OccupancyMap() {
1054     os::free(_map[0]);
1055     os::free(_map[1]);
1056   }
1057 
1058   // Returns true if at address x a chunk is starting.
1059   bool chunk_starts_at_address(MetaWord* p) const {
1060     const unsigned pos = get_bitpos_for_address(p);
1061     return get_bit_at_position(pos, layer_chunk_start_map);
1062   }
1063 
1064   void set_chunk_starts_at_address(MetaWord* p, bool v) {
1065     const unsigned pos = get_bitpos_for_address(p);
1066     set_bit_at_position(pos, layer_chunk_start_map, v);
1067   }
1068 
1069   // Removes all chunk-start-bits inside a region, typically as a
1070   // result of a chunk merge.
1071   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
1072     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
1073   }
1074 
1075   // Returns true if there are life (in use) chunks in the region limited
1076   // by [p, p+word_size).
1077   bool is_region_in_use(MetaWord* p, size_t word_size) const {
1078     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
1079   }
1080 
1081   // Marks the region starting at p with the size word_size as in use
1082   // or free, depending on v.
1083   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
1084     set_bits_of_region(p, word_size, layer_in_use_map, v);
1085   }
1086 
1087 #ifdef ASSERT
1088   // Verify occupancy map for the address range [from, to).
1089   // We need to tell it the address range, because the memory the
1090   // occupancy map is covering may not be fully comitted yet.
1091   void verify(MetaWord* from, MetaWord* to) {
1092     Metachunk* chunk = NULL;
1093     int nth_bit_for_chunk = 0;
1094     MetaWord* chunk_end = NULL;
1095     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
1096       const unsigned pos = get_bitpos_for_address(p);
1097       // Check the chunk-starts-info:
1098       if (get_bit_at_position(pos, layer_chunk_start_map)) {
1099         // Chunk start marked in bitmap.
1100         chunk = (Metachunk*) p;
1101         if (chunk_end != NULL) {
1102           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
1103                  "the next chunk to start at %p).", p, chunk_end);
1104         }
1105         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
1106         if (chunk->get_chunk_type() != HumongousIndex) {
1107           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
1108         }
1109         chunk_end = p + chunk->word_size();
1110         nth_bit_for_chunk = 0;
1111         assert(chunk_end <= to, "Chunk end overlaps test address range.");
1112       } else {
1113         // No chunk start marked in bitmap.
1114         assert(chunk != NULL, "Chunk should start at start of address range.");
1115         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
1116         nth_bit_for_chunk ++;
1117       }
1118       // Check the in-use-info:
1119       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
1120       if (in_use_bit) {
1121         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
1122                chunk, nth_bit_for_chunk);
1123       } else {
1124         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
1125                chunk, nth_bit_for_chunk);
1126       }
1127     }
1128   }
1129 
1130   // Verify that a given chunk is correctly accounted for in the bitmap.
1131   void verify_for_chunk(Metachunk* chunk) {
1132     assert(chunk_starts_at_address((MetaWord*) chunk),
1133            "No chunk start marked in map for chunk %p.", chunk);
1134     // For chunks larger than the minimal chunk size, no other chunk
1135     // must start in its area.
1136     if (chunk->word_size() > _smallest_chunk_word_size) {
1137       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
1138                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
1139              "No chunk must start within another chunk.");
1140     }
1141     if (!chunk->is_tagged_free()) {
1142       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
1143              "Chunk %p is in use but marked as free in map (%d %d).",
1144              chunk, chunk->get_chunk_type(), chunk->get_origin());
1145     } else {
1146       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
1147              "Chunk %p is free but marked as in-use in map (%d %d).",
1148              chunk, chunk->get_chunk_type(), chunk->get_origin());
1149     }
1150   }
1151 
1152 #endif // ASSERT
1153 
1154 };
1155 
1156 // A VirtualSpaceList node.
1157 class VirtualSpaceNode : public CHeapObj<mtClass> {
1158   friend class VirtualSpaceList;
1159 
1160   // Link to next VirtualSpaceNode
1161   VirtualSpaceNode* _next;
1162 
1163   // Whether this node is contained in class or metaspace.
1164   const bool _is_class;
1165 
1166   // total in the VirtualSpace
1167   MemRegion _reserved;
1168   ReservedSpace _rs;
1169   VirtualSpace _virtual_space;
1170   MetaWord* _top;
1171   // count of chunks contained in this VirtualSpace
1172   uintx _container_count;
1173 
1174   OccupancyMap* _occupancy_map;
1175 
1176   // Convenience functions to access the _virtual_space
1177   char* low()  const { return virtual_space()->low(); }
1178   char* high() const { return virtual_space()->high(); }
1179 
1180   // The first Metachunk will be allocated at the bottom of the
1181   // VirtualSpace
1182   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
1183 
1184   // Committed but unused space in the virtual space
1185   size_t free_words_in_vs() const;
1186 
1187   // True if this node belongs to class metaspace.
1188   bool is_class() const { return _is_class; }
1189 
1190   // Helper function for take_from_committed: allocate padding chunks
1191   // until top is at the given address.
1192   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
1193 
1194  public:
1195 
1196   VirtualSpaceNode(bool is_class, size_t byte_size);
1197   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
1198     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
1199   ~VirtualSpaceNode();
1200 
1201   // Convenience functions for logical bottom and end
1202   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
1203   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
1204 
1205   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
1206   OccupancyMap* occupancy_map() { return _occupancy_map; }
1207 
1208   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
1209 
1210   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
1211   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
1212 
1213   bool is_pre_committed() const { return _virtual_space.special(); }
1214 
1215   // address of next available space in _virtual_space;
1216   // Accessors
1217   VirtualSpaceNode* next() { return _next; }
1218   void set_next(VirtualSpaceNode* v) { _next = v; }
1219 
1220   void set_reserved(MemRegion const v) { _reserved = v; }
1221   void set_top(MetaWord* v) { _top = v; }
1222 
1223   // Accessors
1224   MemRegion* reserved() { return &_reserved; }
1225   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
1226 
1227   // Returns true if "word_size" is available in the VirtualSpace
1228   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
1229 
1230   MetaWord* top() const { return _top; }
1231   void inc_top(size_t word_size) { _top += word_size; }
1232 
1233   uintx container_count() { return _container_count; }
1234   void inc_container_count();
1235   void dec_container_count();
1236 #ifdef ASSERT
1237   uintx container_count_slow();
1238   void verify_container_count();
1239 #endif
1240 
1241   // used and capacity in this single entry in the list
1242   size_t used_words_in_vs() const;
1243   size_t capacity_words_in_vs() const;
1244 
1245   bool initialize();
1246 
1247   // get space from the virtual space
1248   Metachunk* take_from_committed(size_t chunk_word_size);
1249 
1250   // Allocate a chunk from the virtual space and return it.
1251   Metachunk* get_chunk_vs(size_t chunk_word_size);
1252 
1253   // Expands/shrinks the committed space in a virtual space.  Delegates
1254   // to Virtualspace
1255   bool expand_by(size_t min_words, size_t preferred_words);
1256 
1257   // In preparation for deleting this node, remove all the chunks
1258   // in the node from any freelist.
1259   void purge(ChunkManager* chunk_manager);
1260 
1261   // If an allocation doesn't fit in the current node a new node is created.
1262   // Allocate chunks out of the remaining committed space in this node
1263   // to avoid wasting that memory.
1264   // This always adds up because all the chunk sizes are multiples of
1265   // the smallest chunk size.
1266   void retire(ChunkManager* chunk_manager);
1267 
1268 
1269   void print_on(outputStream* st) const;
1270   void print_on(outputStream* st, size_t scale) const;
1271   void print_map(outputStream* st, bool is_class) const;
1272 
1273   // Debug support
1274   DEBUG_ONLY(void mangle();)
1275   // Verify counters, all chunks in this list node and the occupancy map.
1276   DEBUG_ONLY(void verify();)
1277   // Verify that all free chunks in this node are ideally merged
1278   // (there not should be multiple small chunks where a large chunk could exist.)
1279   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
1280 
1281 };
1282 
1283 #define assert_is_aligned(value, alignment)                  \
1284   assert(is_aligned((value), (alignment)),                   \
1285          SIZE_FORMAT_HEX " is not aligned to "               \
1286          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
1287 
1288 #define assert_counter(expected_value, real_value, msg) \
1289   assert( (expected_value) == (real_value),             \
1290          "Counter mismatch (%s): expected " SIZE_FORMAT \
1291          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
1292          real_value);
1293 
1294 // Decide if large pages should be committed when the memory is reserved.
1295 static bool should_commit_large_pages_when_reserving(size_t bytes) {
1296   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
1297     size_t words = bytes / BytesPerWord;
1298     bool is_class = false; // We never reserve large pages for the class space.
1299     if (MetaspaceGC::can_expand(words, is_class) &&
1300         MetaspaceGC::allowed_expansion() >= words) {
1301       return true;
1302     }
1303   }
1304 
1305   return false;
1306 }
1307 
1308   // byte_size is the size of the associated virtualspace.
1309 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
1310   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
1311   assert_is_aligned(bytes, Metaspace::reserve_alignment());
1312   bool large_pages = should_commit_large_pages_when_reserving(bytes);
1313   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
1314 
1315   if (_rs.is_reserved()) {
1316     assert(_rs.base() != NULL, "Catch if we get a NULL address");
1317     assert(_rs.size() != 0, "Catch if we get a 0 size");
1318     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
1319     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
1320 
1321     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
1322   }
1323 }
1324 
1325 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
1326   DEBUG_ONLY(this->verify();)
1327   Metachunk* chunk = first_chunk();
1328   Metachunk* invalid_chunk = (Metachunk*) top();
1329   while (chunk < invalid_chunk ) {
1330     assert(chunk->is_tagged_free(), "Should be tagged free");
1331     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1332     chunk_manager->remove_chunk(chunk);
1333     chunk->remove_sentinel();
1334     assert(chunk->next() == NULL &&
1335            chunk->prev() == NULL,
1336            "Was not removed from its list");
1337     chunk = (Metachunk*) next;
1338   }
1339 }
1340 
1341 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
1342 
1343   if (bottom() == top()) {
1344     return;
1345   }
1346 
1347   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
1348   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
1349   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
1350 
1351   int line_len = 100;
1352   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
1353   line_len = (int)(section_len / spec_chunk_size);
1354 
1355   static const int NUM_LINES = 4;
1356 
1357   char* lines[NUM_LINES];
1358   for (int i = 0; i < NUM_LINES; i ++) {
1359     lines[i] = (char*)os::malloc(line_len, mtInternal);
1360   }
1361   int pos = 0;
1362   const MetaWord* p = bottom();
1363   const Metachunk* chunk = (const Metachunk*)p;
1364   const MetaWord* chunk_end = p + chunk->word_size();
1365   while (p < top()) {
1366     if (pos == line_len) {
1367       pos = 0;
1368       for (int i = 0; i < NUM_LINES; i ++) {
1369         st->fill_to(22);
1370         st->print_raw(lines[i], line_len);
1371         st->cr();
1372       }
1373     }
1374     if (pos == 0) {
1375       st->print(PTR_FORMAT ":", p2i(p));
1376     }
1377     if (p == chunk_end) {
1378       chunk = (Metachunk*)p;
1379       chunk_end = p + chunk->word_size();
1380     }
1381     // line 1: chunk starting points (a dot if that area is a chunk start).
1382     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
1383 
1384     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
1385     // chunk is in use.
1386     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
1387     if (chunk->word_size() == spec_chunk_size) {
1388       lines[1][pos] = chunk_is_free ? 'x' : 'X';
1389     } else if (chunk->word_size() == small_chunk_size) {
1390       lines[1][pos] = chunk_is_free ? 's' : 'S';
1391     } else if (chunk->word_size() == med_chunk_size) {
1392       lines[1][pos] = chunk_is_free ? 'm' : 'M';
1393     } else if (chunk->word_size() > med_chunk_size) {
1394       lines[1][pos] = chunk_is_free ? 'h' : 'H';
1395     } else {
1396       ShouldNotReachHere();
1397     }
1398 
1399     // Line 3: chunk origin
1400     const ChunkOrigin origin = chunk->get_origin();
1401     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
1402 
1403     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
1404     //         but were never used.
1405     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
1406 
1407     p += spec_chunk_size;
1408     pos ++;
1409   }
1410   if (pos > 0) {
1411     for (int i = 0; i < NUM_LINES; i ++) {
1412       st->fill_to(22);
1413       st->print_raw(lines[i], line_len);
1414       st->cr();
1415     }
1416   }
1417   for (int i = 0; i < NUM_LINES; i ++) {
1418     os::free(lines[i]);
1419   }
1420 }
1421 
1422 
1423 #ifdef ASSERT
1424 uintx VirtualSpaceNode::container_count_slow() {
1425   uintx count = 0;
1426   Metachunk* chunk = first_chunk();
1427   Metachunk* invalid_chunk = (Metachunk*) top();
1428   while (chunk < invalid_chunk ) {
1429     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1430     do_verify_chunk(chunk);
1431     // Don't count the chunks on the free lists.  Those are
1432     // still part of the VirtualSpaceNode but not currently
1433     // counted.
1434     if (!chunk->is_tagged_free()) {
1435       count++;
1436     }
1437     chunk = (Metachunk*) next;
1438   }
1439   return count;
1440 }
1441 #endif
1442 
1443 #ifdef ASSERT
1444 // Verify counters, all chunks in this list node and the occupancy map.
1445 void VirtualSpaceNode::verify() {
1446   uintx num_in_use_chunks = 0;
1447   Metachunk* chunk = first_chunk();
1448   Metachunk* invalid_chunk = (Metachunk*) top();
1449 
1450   // Iterate the chunks in this node and verify each chunk.
1451   while (chunk < invalid_chunk ) {
1452     DEBUG_ONLY(do_verify_chunk(chunk);)
1453     if (!chunk->is_tagged_free()) {
1454       num_in_use_chunks ++;
1455     }
1456     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1457     chunk = (Metachunk*) next;
1458   }
1459   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1460          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1461   // Also verify the occupancy map.
1462   occupancy_map()->verify(this->bottom(), this->top());
1463 }
1464 #endif // ASSERT
1465 
1466 #ifdef ASSERT
1467 // Verify that all free chunks in this node are ideally merged
1468 // (there not should be multiple small chunks where a large chunk could exist.)
1469 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1470   Metachunk* chunk = first_chunk();
1471   Metachunk* invalid_chunk = (Metachunk*) top();
1472   // Shorthands.
1473   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1474   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1475   int num_free_chunks_since_last_med_boundary = -1;
1476   int num_free_chunks_since_last_small_boundary = -1;
1477   while (chunk < invalid_chunk ) {
1478     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1479     // Reset the counter when encountering a non-free chunk.
1480     if (chunk->get_chunk_type() != HumongousIndex) {
1481       if (chunk->is_tagged_free()) {
1482         // Count successive free, non-humongous chunks.
1483         if (is_aligned(chunk, size_small)) {
1484           assert(num_free_chunks_since_last_small_boundary <= 1,
1485                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1486           num_free_chunks_since_last_small_boundary = 0;
1487         } else if (num_free_chunks_since_last_small_boundary != -1) {
1488           num_free_chunks_since_last_small_boundary ++;
1489         }
1490         if (is_aligned(chunk, size_med)) {
1491           assert(num_free_chunks_since_last_med_boundary <= 1,
1492                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1493           num_free_chunks_since_last_med_boundary = 0;
1494         } else if (num_free_chunks_since_last_med_boundary != -1) {
1495           num_free_chunks_since_last_med_boundary ++;
1496         }
1497       } else {
1498         // Encountering a non-free chunk, reset counters.
1499         num_free_chunks_since_last_med_boundary = -1;
1500         num_free_chunks_since_last_small_boundary = -1;
1501       }
1502     } else {
1503       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1504       num_free_chunks_since_last_med_boundary = -1;
1505       num_free_chunks_since_last_small_boundary = -1;
1506     }
1507 
1508     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1509     chunk = (Metachunk*) next;
1510   }
1511 }
1512 #endif // ASSERT
1513 
1514 // List of VirtualSpaces for metadata allocation.
1515 class VirtualSpaceList : public CHeapObj<mtClass> {
1516   friend class VirtualSpaceNode;
1517 
1518   enum VirtualSpaceSizes {
1519     VirtualSpaceSize = 256 * K
1520   };
1521 
1522   // Head of the list
1523   VirtualSpaceNode* _virtual_space_list;
1524   // virtual space currently being used for allocations
1525   VirtualSpaceNode* _current_virtual_space;
1526 
1527   // Is this VirtualSpaceList used for the compressed class space
1528   bool _is_class;
1529 
1530   // Sum of reserved and committed memory in the virtual spaces
1531   size_t _reserved_words;
1532   size_t _committed_words;
1533 
1534   // Number of virtual spaces
1535   size_t _virtual_space_count;
1536 
1537   ~VirtualSpaceList();
1538 
1539   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1540 
1541   void set_virtual_space_list(VirtualSpaceNode* v) {
1542     _virtual_space_list = v;
1543   }
1544   void set_current_virtual_space(VirtualSpaceNode* v) {
1545     _current_virtual_space = v;
1546   }
1547 
1548   void link_vs(VirtualSpaceNode* new_entry);
1549 
1550   // Get another virtual space and add it to the list.  This
1551   // is typically prompted by a failed attempt to allocate a chunk
1552   // and is typically followed by the allocation of a chunk.
1553   bool create_new_virtual_space(size_t vs_word_size);
1554 
1555   // Chunk up the unused committed space in the current
1556   // virtual space and add the chunks to the free list.
1557   void retire_current_virtual_space();
1558 
1559  public:
1560   VirtualSpaceList(size_t word_size);
1561   VirtualSpaceList(ReservedSpace rs);
1562 
1563   size_t free_bytes();
1564 
1565   Metachunk* get_new_chunk(size_t chunk_word_size,
1566                            size_t suggested_commit_granularity);
1567 
1568   bool expand_node_by(VirtualSpaceNode* node,
1569                       size_t min_words,
1570                       size_t preferred_words);
1571 
1572   bool expand_by(size_t min_words,
1573                  size_t preferred_words);
1574 
1575   VirtualSpaceNode* current_virtual_space() {
1576     return _current_virtual_space;
1577   }
1578 
1579   bool is_class() const { return _is_class; }
1580 
1581   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1582 
1583   size_t reserved_words()  { return _reserved_words; }
1584   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1585   size_t committed_words() { return _committed_words; }
1586   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1587 
1588   void inc_reserved_words(size_t v);
1589   void dec_reserved_words(size_t v);
1590   void inc_committed_words(size_t v);
1591   void dec_committed_words(size_t v);
1592   void inc_virtual_space_count();
1593   void dec_virtual_space_count();
1594 
1595   bool contains(const void* ptr);
1596 
1597   // Unlink empty VirtualSpaceNodes and free it.
1598   void purge(ChunkManager* chunk_manager);
1599 
1600   void print_on(outputStream* st) const;
1601   void print_on(outputStream* st, size_t scale) const;
1602   void print_map(outputStream* st) const;
1603 
1604   class VirtualSpaceListIterator : public StackObj {
1605     VirtualSpaceNode* _virtual_spaces;
1606    public:
1607     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1608       _virtual_spaces(virtual_spaces) {}
1609 
1610     bool repeat() {
1611       return _virtual_spaces != NULL;
1612     }
1613 
1614     VirtualSpaceNode* get_next() {
1615       VirtualSpaceNode* result = _virtual_spaces;
1616       if (_virtual_spaces != NULL) {
1617         _virtual_spaces = _virtual_spaces->next();
1618       }
1619       return result;
1620     }
1621   };
1622 };
1623 
1624 class Metadebug : AllStatic {
1625   // Debugging support for Metaspaces
1626   static int _allocation_fail_alot_count;
1627 
1628  public:
1629 
1630   static void init_allocation_fail_alot_count();
1631 #ifdef ASSERT
1632   static bool test_metadata_failure();
1633 #endif
1634 };
1635 
1636 int Metadebug::_allocation_fail_alot_count = 0;
1637 
1638 
1639 //  SpaceManager - used by Metaspace to handle allocations
1640 class SpaceManager : public CHeapObj<mtClass> {
1641   friend class ClassLoaderMetaspace;
1642   friend class Metadebug;
1643 
1644  private:
1645 
1646   // protects allocations
1647   Mutex* const _lock;
1648 
1649   // Type of metadata allocated.
1650   const Metaspace::MetadataType   _mdtype;
1651 
1652   // Type of metaspace
1653   const Metaspace::MetaspaceType  _space_type;
1654 
1655   // List of chunks in use by this SpaceManager.  Allocations
1656   // are done from the current chunk.  The list is used for deallocating
1657   // chunks when the SpaceManager is freed.
1658   Metachunk* _chunks_in_use[NumberOfInUseLists];
1659   Metachunk* _current_chunk;
1660 
1661   // Maximum number of small chunks to allocate to a SpaceManager
1662   static uint const _small_chunk_limit;
1663 
1664   // Maximum number of specialize chunks to allocate for anonymous and delegating
1665   // metadata space to a SpaceManager
1666   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1667 
1668   // Sum of used space in chunks, including overhead incurred by chunk headers.
1669   size_t _allocated_block_words;
1670 
1671   // Sum of all allocated chunks
1672   size_t _allocated_chunks_words;
1673   size_t _allocated_chunks_count;
1674 
1675   // Free lists of blocks are per SpaceManager since they
1676   // are assumed to be in chunks in use by the SpaceManager
1677   // and all chunks in use by a SpaceManager are freed when
1678   // the class loader using the SpaceManager is collected.
1679   BlockFreelist* _block_freelists;
1680 
1681  private:
1682   // Accessors
1683   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1684   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1685     _chunks_in_use[index] = v;
1686   }
1687 
1688   BlockFreelist* block_freelists() const { return _block_freelists; }
1689 
1690   Metaspace::MetadataType mdtype() { return _mdtype; }
1691 
1692   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1693   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1694 
1695   Metachunk* current_chunk() const { return _current_chunk; }
1696   void set_current_chunk(Metachunk* v) {
1697     _current_chunk = v;
1698   }
1699 
1700   Metachunk* find_current_chunk(size_t word_size);
1701 
1702   // Add chunk to the list of chunks in use
1703   void add_chunk(Metachunk* v, bool make_current);
1704   void retire_current_chunk();
1705 
1706   Mutex* lock() const { return _lock; }
1707 
1708   // Adds to the given statistic object. Must be locked with CLD metaspace lock.
1709   void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1710 
1711   // Verify internal counters against the current state. Must be locked with CLD metaspace lock.
1712   DEBUG_ONLY(void verify_metrics_locked() const;)
1713 
1714  protected:
1715   void initialize();
1716 
1717  public:
1718   SpaceManager(Metaspace::MetadataType mdtype,
1719                Metaspace::MetaspaceType space_type,
1720                Mutex* lock);
1721   ~SpaceManager();
1722 
1723   enum ChunkMultiples {
1724     MediumChunkMultiple = 4
1725   };
1726 
1727   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1728   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1729   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1730 
1731   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1732 
1733   // Accessors
1734   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1735 
1736   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1737   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1738   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1739 
1740   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1741 
1742   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1743 
1744   size_t allocated_blocks_words() const { return _allocated_block_words; }
1745   size_t allocated_blocks_bytes() const { return _allocated_block_words * BytesPerWord; }
1746   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1747   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1748   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1749 
1750   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1751 
1752   // Increment the per Metaspace and global running sums for Metachunks
1753   // by the given size.  This is used when a Metachunk to added to
1754   // the in-use list.
1755   void inc_size_metrics(size_t words);
1756   // Increment the per Metaspace and global running sums Metablocks by the given
1757   // size.  This is used when a Metablock is allocated.
1758   void inc_used_metrics(size_t words);
1759   // Delete the portion of the running sums for this SpaceManager. That is,
1760   // the globals running sums for the Metachunks and Metablocks are
1761   // decremented for all the Metachunks in-use by this SpaceManager.
1762   void dec_total_from_size_metrics();
1763 
1764   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1765   // or return the unadjusted size if the requested size is humongous.
1766   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1767   size_t adjust_initial_chunk_size(size_t requested) const;
1768 
1769   // Get the initial chunks size for this metaspace type.
1770   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1771 
1772   // Todo: remove this if we have counters by chunk type.
1773   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1774 
1775   Metachunk* get_new_chunk(size_t chunk_word_size);
1776 
1777   // Block allocation and deallocation.
1778   // Allocates a block from the current chunk
1779   MetaWord* allocate(size_t word_size);
1780 
1781   // Helper for allocations
1782   MetaWord* allocate_work(size_t word_size);
1783 
1784   // Returns a block to the per manager freelist
1785   void deallocate(MetaWord* p, size_t word_size);
1786 
1787   // Based on the allocation size and a minimum chunk size,
1788   // returned chunk size (for expanding space for chunk allocation).
1789   size_t calc_chunk_size(size_t allocation_word_size);
1790 
1791   // Called when an allocation from the current chunk fails.
1792   // Gets a new chunk (may require getting a new virtual space),
1793   // and allocates from that chunk.
1794   MetaWord* grow_and_allocate(size_t word_size);
1795 
1796   // Notify memory usage to MemoryService.
1797   void track_metaspace_memory_usage();
1798 
1799   // debugging support.
1800 
1801   void print_on(outputStream* st) const;
1802   void locked_print_chunks_in_use_on(outputStream* st) const;
1803 
1804   void verify();
1805   void verify_chunk_size(Metachunk* chunk);
1806 
1807   // This adjusts the size given to be greater than the minimum allocation size in
1808   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1809   size_t get_allocation_word_size(size_t word_size) {
1810     size_t byte_size = word_size * BytesPerWord;
1811 
1812     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1813     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1814 
1815     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1816     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1817 
1818     return raw_word_size;
1819   }
1820 
1821   // Adds to the given statistic object. Will lock with CLD metaspace lock.
1822   void add_to_statistics(SpaceManagerStatistics* out) const;
1823 
1824   // Verify internal counters against the current state. Will lock with CLD metaspace lock.
1825   DEBUG_ONLY(void verify_metrics() const;)
1826 
1827 };
1828 
1829 uint const SpaceManager::_small_chunk_limit = 4;
1830 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1831 
1832 void VirtualSpaceNode::inc_container_count() {
1833   assert_lock_strong(MetaspaceExpand_lock);
1834   _container_count++;
1835 }
1836 
1837 void VirtualSpaceNode::dec_container_count() {
1838   assert_lock_strong(MetaspaceExpand_lock);
1839   _container_count--;
1840 }
1841 
1842 #ifdef ASSERT
1843 void VirtualSpaceNode::verify_container_count() {
1844   assert(_container_count == container_count_slow(),
1845          "Inconsistency in container_count _container_count " UINTX_FORMAT
1846          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1847 }
1848 #endif
1849 
1850 // BlockFreelist methods
1851 
1852 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1853 
1854 BlockFreelist::~BlockFreelist() {
1855   delete _dictionary;
1856   if (_small_blocks != NULL) {
1857     delete _small_blocks;
1858   }
1859 }
1860 
1861 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1862   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1863 
1864   Metablock* free_chunk = ::new (p) Metablock(word_size);
1865   if (word_size < SmallBlocks::small_block_max_size()) {
1866     small_blocks()->return_block(free_chunk, word_size);
1867   } else {
1868   dictionary()->return_chunk(free_chunk);
1869 }
1870   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1871             SIZE_FORMAT, p2i(free_chunk), word_size);
1872 }
1873 
1874 MetaWord* BlockFreelist::get_block(size_t word_size) {
1875   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1876 
1877   // Try small_blocks first.
1878   if (word_size < SmallBlocks::small_block_max_size()) {
1879     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1880     // this space manager.
1881     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1882     if (new_block != NULL) {
1883       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1884               p2i(new_block), word_size);
1885       return new_block;
1886     }
1887   }
1888 
1889   if (word_size < BlockFreelist::min_dictionary_size()) {
1890     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1891     return NULL;
1892   }
1893 
1894   Metablock* free_block = dictionary()->get_chunk(word_size);
1895   if (free_block == NULL) {
1896     return NULL;
1897   }
1898 
1899   const size_t block_size = free_block->size();
1900   if (block_size > WasteMultiplier * word_size) {
1901     return_block((MetaWord*)free_block, block_size);
1902     return NULL;
1903   }
1904 
1905   MetaWord* new_block = (MetaWord*)free_block;
1906   assert(block_size >= word_size, "Incorrect size of block from freelist");
1907   const size_t unused = block_size - word_size;
1908   if (unused >= SmallBlocks::small_block_min_size()) {
1909     return_block(new_block + word_size, unused);
1910   }
1911 
1912   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1913             p2i(new_block), word_size);
1914   return new_block;
1915 }
1916 
1917 void BlockFreelist::print_on(outputStream* st) const {
1918   dictionary()->print_free_lists(st);
1919   if (_small_blocks != NULL) {
1920     _small_blocks->print_on(st);
1921   }
1922 }
1923 
1924 // VirtualSpaceNode methods
1925 
1926 VirtualSpaceNode::~VirtualSpaceNode() {
1927   _rs.release();
1928   if (_occupancy_map != NULL) {
1929     delete _occupancy_map;
1930   }
1931 #ifdef ASSERT
1932   size_t word_size = sizeof(*this) / BytesPerWord;
1933   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1934 #endif
1935 }
1936 
1937 size_t VirtualSpaceNode::used_words_in_vs() const {
1938   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1939 }
1940 
1941 // Space committed in the VirtualSpace
1942 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1943   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1944 }
1945 
1946 size_t VirtualSpaceNode::free_words_in_vs() const {
1947   return pointer_delta(end(), top(), sizeof(MetaWord));
1948 }
1949 
1950 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1951 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1952 
1953   assert(target_top > top(), "Sanity");
1954 
1955   // Padding chunks are added to the freelist.
1956   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1957 
1958   // shorthands
1959   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1960   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1961   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1962 
1963   while (top() < target_top) {
1964 
1965     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1966     // for padding chunks, so it is not worth it.
1967     size_t padding_chunk_word_size = small_word_size;
1968     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1969       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1970       padding_chunk_word_size = spec_word_size;
1971     }
1972     MetaWord* here = top();
1973     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1974     inc_top(padding_chunk_word_size);
1975 
1976     // Create new padding chunk.
1977     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1978     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1979 
1980     Metachunk* const padding_chunk =
1981       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1982     assert(padding_chunk == (Metachunk*)here, "Sanity");
1983     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1984     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1985                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1986                                        (is_class() ? "class space " : "metaspace"),
1987                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1988 
1989     // Mark chunk start in occupancy map.
1990     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1991 
1992     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1993     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1994     // will assert that).
1995     do_update_in_use_info_for_chunk(padding_chunk, true);
1996 
1997     // Return Chunk to freelist.
1998     inc_container_count();
1999     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
2000     // Please note: at this point, ChunkManager::return_single_chunk()
2001     // may already have merged the padding chunk with neighboring chunks, so
2002     // it may have vanished at this point. Do not reference the padding
2003     // chunk beyond this point.
2004   }
2005 
2006   assert(top() == target_top, "Sanity");
2007 
2008 } // allocate_padding_chunks_until_top_is_at()
2009 
2010 // Allocates the chunk from the virtual space only.
2011 // This interface is also used internally for debugging.  Not all
2012 // chunks removed here are necessarily used for allocation.
2013 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
2014   // Non-humongous chunks are to be allocated aligned to their chunk
2015   // size. So, start addresses of medium chunks are aligned to medium
2016   // chunk size, those of small chunks to small chunk size and so
2017   // forth. This facilitates merging of free chunks and reduces
2018   // fragmentation. Chunk sizes are spec < small < medium, with each
2019   // larger chunk size being a multiple of the next smaller chunk
2020   // size.
2021   // Because of this alignment, me may need to create a number of padding
2022   // chunks. These chunks are created and added to the freelist.
2023 
2024   // The chunk manager to which we will give our padding chunks.
2025   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
2026 
2027   // shorthands
2028   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
2029   const size_t small_word_size = chunk_manager->small_chunk_word_size();
2030   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
2031 
2032   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
2033          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
2034 
2035   // Chunk alignment (in bytes) == chunk size unless humongous.
2036   // Humongous chunks are aligned to the smallest chunk size (spec).
2037   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
2038                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
2039 
2040   // Do we have enough space to create the requested chunk plus
2041   // any padding chunks needed?
2042   MetaWord* const next_aligned =
2043     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
2044   if (!is_available((next_aligned - top()) + chunk_word_size)) {
2045     return NULL;
2046   }
2047 
2048   // Before allocating the requested chunk, allocate padding chunks if necessary.
2049   // We only need to do this for small or medium chunks: specialized chunks are the
2050   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
2051   // (implicitly, also aligned to smallest chunk size).
2052   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
2053     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
2054         (is_class() ? "class space " : "metaspace"),
2055         top(), next_aligned);
2056     allocate_padding_chunks_until_top_is_at(next_aligned);
2057     // Now, top should be aligned correctly.
2058     assert_is_aligned(top(), required_chunk_alignment);
2059   }
2060 
2061   // Now, top should be aligned correctly.
2062   assert_is_aligned(top(), required_chunk_alignment);
2063 
2064   // Bottom of the new chunk
2065   MetaWord* chunk_limit = top();
2066   assert(chunk_limit != NULL, "Not safe to call this method");
2067 
2068   // The virtual spaces are always expanded by the
2069   // commit granularity to enforce the following condition.
2070   // Without this the is_available check will not work correctly.
2071   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
2072       "The committed memory doesn't match the expanded memory.");
2073 
2074   if (!is_available(chunk_word_size)) {
2075     LogTarget(Debug, gc, metaspace, freelist) lt;
2076     if (lt.is_enabled()) {
2077       LogStream ls(lt);
2078       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
2079       // Dump some information about the virtual space that is nearly full
2080       print_on(&ls);
2081     }
2082     return NULL;
2083   }
2084 
2085   // Take the space  (bump top on the current virtual space).
2086   inc_top(chunk_word_size);
2087 
2088   // Initialize the chunk
2089   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
2090   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
2091   assert(result == (Metachunk*)chunk_limit, "Sanity");
2092   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
2093   do_update_in_use_info_for_chunk(result, true);
2094 
2095   inc_container_count();
2096 
2097   if (VerifyMetaspace) {
2098     DEBUG_ONLY(chunk_manager->locked_verify());
2099     DEBUG_ONLY(this->verify());
2100   }
2101 
2102   DEBUG_ONLY(do_verify_chunk(result));
2103 
2104   result->inc_use_count();
2105 
2106   return result;
2107 }
2108 
2109 
2110 // Expand the virtual space (commit more of the reserved space)
2111 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
2112   size_t min_bytes = min_words * BytesPerWord;
2113   size_t preferred_bytes = preferred_words * BytesPerWord;
2114 
2115   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
2116 
2117   if (uncommitted < min_bytes) {
2118     return false;
2119   }
2120 
2121   size_t commit = MIN2(preferred_bytes, uncommitted);
2122   bool result = virtual_space()->expand_by(commit, false);
2123 
2124   if (result) {
2125     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
2126               (is_class() ? "class" : "non-class"), commit);
2127     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
2128   } else {
2129     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
2130               (is_class() ? "class" : "non-class"), commit);
2131   }
2132 
2133   assert(result, "Failed to commit memory");
2134 
2135   return result;
2136 }
2137 
2138 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
2139   assert_lock_strong(MetaspaceExpand_lock);
2140   Metachunk* result = take_from_committed(chunk_word_size);
2141   return result;
2142 }
2143 
2144 bool VirtualSpaceNode::initialize() {
2145 
2146   if (!_rs.is_reserved()) {
2147     return false;
2148   }
2149 
2150   // These are necessary restriction to make sure that the virtual space always
2151   // grows in steps of Metaspace::commit_alignment(). If both base and size are
2152   // aligned only the middle alignment of the VirtualSpace is used.
2153   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
2154   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
2155 
2156   // ReservedSpaces marked as special will have the entire memory
2157   // pre-committed. Setting a committed size will make sure that
2158   // committed_size and actual_committed_size agrees.
2159   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
2160 
2161   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
2162                                             Metaspace::commit_alignment());
2163   if (result) {
2164     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
2165         "Checking that the pre-committed memory was registered by the VirtualSpace");
2166 
2167     set_top((MetaWord*)virtual_space()->low());
2168     set_reserved(MemRegion((HeapWord*)_rs.base(),
2169                  (HeapWord*)(_rs.base() + _rs.size())));
2170 
2171     assert(reserved()->start() == (HeapWord*) _rs.base(),
2172            "Reserved start was not set properly " PTR_FORMAT
2173            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
2174     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
2175            "Reserved size was not set properly " SIZE_FORMAT
2176            " != " SIZE_FORMAT, reserved()->word_size(),
2177            _rs.size() / BytesPerWord);
2178   }
2179 
2180   // Initialize Occupancy Map.
2181   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
2182   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
2183 
2184   return result;
2185 }
2186 
2187 void VirtualSpaceNode::print_on(outputStream* st) const {
2188   print_on(st, K);
2189 }
2190 
2191 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
2192   size_t used_words = used_words_in_vs();
2193   size_t commit_words = committed_words();
2194   size_t res_words = reserved_words();
2195   VirtualSpace* vs = virtual_space();
2196 
2197   st->print("node @" PTR_FORMAT ": ", p2i(this));
2198   st->print("reserved=");
2199   print_scaled_words(st, res_words, scale);
2200   st->print(", committed=");
2201   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
2202   st->print(", used=");
2203   print_scaled_words_and_percentage(st, used_words, res_words, scale);
2204   st->cr();
2205   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
2206            PTR_FORMAT ", " PTR_FORMAT ")",
2207            p2i(bottom()), p2i(top()), p2i(end()),
2208            p2i(vs->high_boundary()));
2209 }
2210 
2211 #ifdef ASSERT
2212 void VirtualSpaceNode::mangle() {
2213   size_t word_size = capacity_words_in_vs();
2214   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
2215 }
2216 #endif // ASSERT
2217 
2218 // VirtualSpaceList methods
2219 // Space allocated from the VirtualSpace
2220 
2221 VirtualSpaceList::~VirtualSpaceList() {
2222   VirtualSpaceListIterator iter(virtual_space_list());
2223   while (iter.repeat()) {
2224     VirtualSpaceNode* vsl = iter.get_next();
2225     delete vsl;
2226   }
2227 }
2228 
2229 void VirtualSpaceList::inc_reserved_words(size_t v) {
2230   assert_lock_strong(MetaspaceExpand_lock);
2231   _reserved_words = _reserved_words + v;
2232 }
2233 void VirtualSpaceList::dec_reserved_words(size_t v) {
2234   assert_lock_strong(MetaspaceExpand_lock);
2235   _reserved_words = _reserved_words - v;
2236 }
2237 
2238 #define assert_committed_below_limit()                        \
2239   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
2240          "Too much committed memory. Committed: " SIZE_FORMAT \
2241          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
2242           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
2243 
2244 void VirtualSpaceList::inc_committed_words(size_t v) {
2245   assert_lock_strong(MetaspaceExpand_lock);
2246   _committed_words = _committed_words + v;
2247 
2248   assert_committed_below_limit();
2249 }
2250 void VirtualSpaceList::dec_committed_words(size_t v) {
2251   assert_lock_strong(MetaspaceExpand_lock);
2252   _committed_words = _committed_words - v;
2253 
2254   assert_committed_below_limit();
2255 }
2256 
2257 void VirtualSpaceList::inc_virtual_space_count() {
2258   assert_lock_strong(MetaspaceExpand_lock);
2259   _virtual_space_count++;
2260 }
2261 void VirtualSpaceList::dec_virtual_space_count() {
2262   assert_lock_strong(MetaspaceExpand_lock);
2263   _virtual_space_count--;
2264 }
2265 
2266 void ChunkManager::remove_chunk(Metachunk* chunk) {
2267   size_t word_size = chunk->word_size();
2268   ChunkIndex index = list_index(word_size);
2269   if (index != HumongousIndex) {
2270     free_chunks(index)->remove_chunk(chunk);
2271   } else {
2272     humongous_dictionary()->remove_chunk(chunk);
2273   }
2274 
2275   // Chunk has been removed from the chunks free list, update counters.
2276   account_for_removed_chunk(chunk);
2277 }
2278 
2279 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
2280   assert_lock_strong(MetaspaceExpand_lock);
2281   assert(chunk != NULL, "invalid chunk pointer");
2282   // Check for valid merge combinations.
2283   assert((chunk->get_chunk_type() == SpecializedIndex &&
2284           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
2285          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
2286         "Invalid chunk merge combination.");
2287 
2288   const size_t target_chunk_word_size =
2289     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
2290 
2291   // [ prospective merge region )
2292   MetaWord* const p_merge_region_start =
2293     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
2294   MetaWord* const p_merge_region_end =
2295     p_merge_region_start + target_chunk_word_size;
2296 
2297   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
2298   VirtualSpaceNode* const vsn = chunk->container();
2299   OccupancyMap* const ocmap = vsn->occupancy_map();
2300 
2301   // The prospective chunk merge range must be completely contained by the
2302   // committed range of the virtual space node.
2303   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
2304     return false;
2305   }
2306 
2307   // Only attempt to merge this range if at its start a chunk starts and at its end
2308   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
2309   // of that range, we cannot merge.
2310   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
2311     return false;
2312   }
2313   if (p_merge_region_end < vsn->top() &&
2314       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
2315     return false;
2316   }
2317 
2318   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
2319   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
2320     return false;
2321   }
2322 
2323   // Success! Remove all chunks in this region...
2324   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
2325     (is_class() ? "class space" : "metaspace"),
2326     p_merge_region_start, p_merge_region_end);
2327 
2328   const int num_chunks_removed =
2329     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
2330 
2331   // ... and create a single new bigger chunk.
2332   Metachunk* const p_new_chunk =
2333       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
2334   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
2335   p_new_chunk->set_origin(origin_merge);
2336 
2337   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
2338     (is_class() ? "class space" : "metaspace"),
2339     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
2340 
2341   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
2342   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
2343   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
2344 
2345   // Mark chunk as free. Note: it is not necessary to update the occupancy
2346   // map in-use map, because the old chunks were also free, so nothing
2347   // should have changed.
2348   p_new_chunk->set_is_tagged_free(true);
2349 
2350   // Add new chunk to its freelist.
2351   ChunkList* const list = free_chunks(target_chunk_type);
2352   list->return_chunk_at_head(p_new_chunk);
2353 
2354   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
2355   // should not have changed, because the size of the space should be the same)
2356   _free_chunks_count -= num_chunks_removed;
2357   _free_chunks_count ++;
2358 
2359   // VirtualSpaceNode::container_count does not have to be modified:
2360   // it means "number of active (non-free) chunks", so merging free chunks
2361   // should not affect that count.
2362 
2363   // At the end of a chunk merge, run verification tests.
2364   if (VerifyMetaspace) {
2365     DEBUG_ONLY(this->locked_verify());
2366     DEBUG_ONLY(vsn->verify());
2367   }
2368 
2369   return true;
2370 }
2371 
2372 // Remove all chunks in the given area - the chunks are supposed to be free -
2373 // from their corresponding freelists. Mark them as invalid.
2374 // - This does not correct the occupancy map.
2375 // - This does not adjust the counters in ChunkManager.
2376 // - Does not adjust container count counter in containing VirtualSpaceNode
2377 // Returns number of chunks removed.
2378 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
2379   assert(p != NULL && word_size > 0, "Invalid range.");
2380   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
2381   assert_is_aligned(word_size, smallest_chunk_size);
2382 
2383   Metachunk* const start = (Metachunk*) p;
2384   const Metachunk* const end = (Metachunk*)(p + word_size);
2385   Metachunk* cur = start;
2386   int num_removed = 0;
2387   while (cur < end) {
2388     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
2389     DEBUG_ONLY(do_verify_chunk(cur));
2390     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
2391     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
2392     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
2393       (is_class() ? "class space" : "metaspace"),
2394       cur, cur->word_size() * sizeof(MetaWord));
2395     cur->remove_sentinel();
2396     // Note: cannot call ChunkManager::remove_chunk, because that
2397     // modifies the counters in ChunkManager, which we do not want. So
2398     // we call remove_chunk on the freelist directly (see also the
2399     // splitting function which does the same).
2400     ChunkList* const list = free_chunks(list_index(cur->word_size()));
2401     list->remove_chunk(cur);
2402     num_removed ++;
2403     cur = next;
2404   }
2405   return num_removed;
2406 }
2407 
2408 // Walk the list of VirtualSpaceNodes and delete
2409 // nodes with a 0 container_count.  Remove Metachunks in
2410 // the node from their respective freelists.
2411 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2412   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2413   assert_lock_strong(MetaspaceExpand_lock);
2414   // Don't use a VirtualSpaceListIterator because this
2415   // list is being changed and a straightforward use of an iterator is not safe.
2416   VirtualSpaceNode* purged_vsl = NULL;
2417   VirtualSpaceNode* prev_vsl = virtual_space_list();
2418   VirtualSpaceNode* next_vsl = prev_vsl;
2419   while (next_vsl != NULL) {
2420     VirtualSpaceNode* vsl = next_vsl;
2421     DEBUG_ONLY(vsl->verify_container_count();)
2422     next_vsl = vsl->next();
2423     // Don't free the current virtual space since it will likely
2424     // be needed soon.
2425     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2426       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2427                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2428       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2429       // Unlink it from the list
2430       if (prev_vsl == vsl) {
2431         // This is the case of the current node being the first node.
2432         assert(vsl == virtual_space_list(), "Expected to be the first node");
2433         set_virtual_space_list(vsl->next());
2434       } else {
2435         prev_vsl->set_next(vsl->next());
2436       }
2437 
2438       vsl->purge(chunk_manager);
2439       dec_reserved_words(vsl->reserved_words());
2440       dec_committed_words(vsl->committed_words());
2441       dec_virtual_space_count();
2442       purged_vsl = vsl;
2443       delete vsl;
2444     } else {
2445       prev_vsl = vsl;
2446     }
2447   }
2448 #ifdef ASSERT
2449   if (purged_vsl != NULL) {
2450     // List should be stable enough to use an iterator here.
2451     VirtualSpaceListIterator iter(virtual_space_list());
2452     while (iter.repeat()) {
2453       VirtualSpaceNode* vsl = iter.get_next();
2454       assert(vsl != purged_vsl, "Purge of vsl failed");
2455     }
2456   }
2457 #endif
2458 }
2459 
2460 
2461 // This function looks at the mmap regions in the metaspace without locking.
2462 // The chunks are added with store ordering and not deleted except for at
2463 // unloading time during a safepoint.
2464 bool VirtualSpaceList::contains(const void* ptr) {
2465   // List should be stable enough to use an iterator here because removing virtual
2466   // space nodes is only allowed at a safepoint.
2467   VirtualSpaceListIterator iter(virtual_space_list());
2468   while (iter.repeat()) {
2469     VirtualSpaceNode* vsn = iter.get_next();
2470     if (vsn->contains(ptr)) {
2471       return true;
2472     }
2473   }
2474   return false;
2475 }
2476 
2477 void VirtualSpaceList::retire_current_virtual_space() {
2478   assert_lock_strong(MetaspaceExpand_lock);
2479 
2480   VirtualSpaceNode* vsn = current_virtual_space();
2481 
2482   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2483                                   Metaspace::chunk_manager_metadata();
2484 
2485   vsn->retire(cm);
2486 }
2487 
2488 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2489   DEBUG_ONLY(verify_container_count();)
2490   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2491   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2492     ChunkIndex index = (ChunkIndex)i;
2493     size_t chunk_size = chunk_manager->size_by_index(index);
2494 
2495     while (free_words_in_vs() >= chunk_size) {
2496       Metachunk* chunk = get_chunk_vs(chunk_size);
2497       // Chunk will be allocated aligned, so allocation may require
2498       // additional padding chunks. That may cause above allocation to
2499       // fail. Just ignore the failed allocation and continue with the
2500       // next smaller chunk size. As the VirtualSpaceNode comitted
2501       // size should be a multiple of the smallest chunk size, we
2502       // should always be able to fill the VirtualSpace completely.
2503       if (chunk == NULL) {
2504         break;
2505       }
2506       chunk_manager->return_single_chunk(index, chunk);
2507     }
2508     DEBUG_ONLY(verify_container_count();)
2509   }
2510   assert(free_words_in_vs() == 0, "should be empty now");
2511 }
2512 
2513 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2514                                    _is_class(false),
2515                                    _virtual_space_list(NULL),
2516                                    _current_virtual_space(NULL),
2517                                    _reserved_words(0),
2518                                    _committed_words(0),
2519                                    _virtual_space_count(0) {
2520   MutexLockerEx cl(MetaspaceExpand_lock,
2521                    Mutex::_no_safepoint_check_flag);
2522   create_new_virtual_space(word_size);
2523 }
2524 
2525 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2526                                    _is_class(true),
2527                                    _virtual_space_list(NULL),
2528                                    _current_virtual_space(NULL),
2529                                    _reserved_words(0),
2530                                    _committed_words(0),
2531                                    _virtual_space_count(0) {
2532   MutexLockerEx cl(MetaspaceExpand_lock,
2533                    Mutex::_no_safepoint_check_flag);
2534   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2535   bool succeeded = class_entry->initialize();
2536   if (succeeded) {
2537     link_vs(class_entry);
2538   }
2539 }
2540 
2541 size_t VirtualSpaceList::free_bytes() {
2542   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2543 }
2544 
2545 // Allocate another meta virtual space and add it to the list.
2546 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2547   assert_lock_strong(MetaspaceExpand_lock);
2548 
2549   if (is_class()) {
2550     assert(false, "We currently don't support more than one VirtualSpace for"
2551                   " the compressed class space. The initialization of the"
2552                   " CCS uses another code path and should not hit this path.");
2553     return false;
2554   }
2555 
2556   if (vs_word_size == 0) {
2557     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2558     return false;
2559   }
2560 
2561   // Reserve the space
2562   size_t vs_byte_size = vs_word_size * BytesPerWord;
2563   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2564 
2565   // Allocate the meta virtual space and initialize it.
2566   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2567   if (!new_entry->initialize()) {
2568     delete new_entry;
2569     return false;
2570   } else {
2571     assert(new_entry->reserved_words() == vs_word_size,
2572         "Reserved memory size differs from requested memory size");
2573     // ensure lock-free iteration sees fully initialized node
2574     OrderAccess::storestore();
2575     link_vs(new_entry);
2576     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2577     return true;
2578   }
2579 }
2580 
2581 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2582   if (virtual_space_list() == NULL) {
2583       set_virtual_space_list(new_entry);
2584   } else {
2585     current_virtual_space()->set_next(new_entry);
2586   }
2587   set_current_virtual_space(new_entry);
2588   inc_reserved_words(new_entry->reserved_words());
2589   inc_committed_words(new_entry->committed_words());
2590   inc_virtual_space_count();
2591 #ifdef ASSERT
2592   new_entry->mangle();
2593 #endif
2594   LogTarget(Trace, gc, metaspace) lt;
2595   if (lt.is_enabled()) {
2596     LogStream ls(lt);
2597     VirtualSpaceNode* vsl = current_virtual_space();
2598     ResourceMark rm;
2599     vsl->print_on(&ls);
2600   }
2601 }
2602 
2603 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2604                                       size_t min_words,
2605                                       size_t preferred_words) {
2606   size_t before = node->committed_words();
2607 
2608   bool result = node->expand_by(min_words, preferred_words);
2609 
2610   size_t after = node->committed_words();
2611 
2612   // after and before can be the same if the memory was pre-committed.
2613   assert(after >= before, "Inconsistency");
2614   inc_committed_words(after - before);
2615 
2616   return result;
2617 }
2618 
2619 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2620   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2621   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2622   assert(min_words <= preferred_words, "Invalid arguments");
2623 
2624   const char* const class_or_not = (is_class() ? "class" : "non-class");
2625 
2626   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2627     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2628               class_or_not);
2629     return  false;
2630   }
2631 
2632   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2633   if (allowed_expansion_words < min_words) {
2634     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2635               class_or_not);
2636     return false;
2637   }
2638 
2639   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2640 
2641   // Commit more memory from the the current virtual space.
2642   bool vs_expanded = expand_node_by(current_virtual_space(),
2643                                     min_words,
2644                                     max_expansion_words);
2645   if (vs_expanded) {
2646      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2647                class_or_not);
2648      return true;
2649   }
2650   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2651             class_or_not);
2652   retire_current_virtual_space();
2653 
2654   // Get another virtual space.
2655   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2656   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2657 
2658   if (create_new_virtual_space(grow_vs_words)) {
2659     if (current_virtual_space()->is_pre_committed()) {
2660       // The memory was pre-committed, so we are done here.
2661       assert(min_words <= current_virtual_space()->committed_words(),
2662           "The new VirtualSpace was pre-committed, so it"
2663           "should be large enough to fit the alloc request.");
2664       return true;
2665     }
2666 
2667     return expand_node_by(current_virtual_space(),
2668                           min_words,
2669                           max_expansion_words);
2670   }
2671 
2672   return false;
2673 }
2674 
2675 // Given a chunk, calculate the largest possible padding space which
2676 // could be required when allocating it.
2677 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2678   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2679   if (chunk_type != HumongousIndex) {
2680     // Normal, non-humongous chunks are allocated at chunk size
2681     // boundaries, so the largest padding space required would be that
2682     // minus the smallest chunk size.
2683     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2684     return chunk_word_size - smallest_chunk_size;
2685   } else {
2686     // Humongous chunks are allocated at smallest-chunksize
2687     // boundaries, so there is no padding required.
2688     return 0;
2689   }
2690 }
2691 
2692 
2693 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2694 
2695   // Allocate a chunk out of the current virtual space.
2696   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2697 
2698   if (next != NULL) {
2699     return next;
2700   }
2701 
2702   // The expand amount is currently only determined by the requested sizes
2703   // and not how much committed memory is left in the current virtual space.
2704 
2705   // We must have enough space for the requested size and any
2706   // additional reqired padding chunks.
2707   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2708 
2709   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2710   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2711   if (min_word_size >= preferred_word_size) {
2712     // Can happen when humongous chunks are allocated.
2713     preferred_word_size = min_word_size;
2714   }
2715 
2716   bool expanded = expand_by(min_word_size, preferred_word_size);
2717   if (expanded) {
2718     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2719     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2720   }
2721 
2722    return next;
2723 }
2724 
2725 void VirtualSpaceList::print_on(outputStream* st) const {
2726   print_on(st, K);
2727 }
2728 
2729 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2730   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2731       _virtual_space_count, p2i(_current_virtual_space));
2732   VirtualSpaceListIterator iter(virtual_space_list());
2733   while (iter.repeat()) {
2734     st->cr();
2735     VirtualSpaceNode* node = iter.get_next();
2736     node->print_on(st, scale);
2737   }
2738 }
2739 
2740 void VirtualSpaceList::print_map(outputStream* st) const {
2741   VirtualSpaceNode* list = virtual_space_list();
2742   VirtualSpaceListIterator iter(list);
2743   unsigned i = 0;
2744   while (iter.repeat()) {
2745     st->print_cr("Node %u:", i);
2746     VirtualSpaceNode* node = iter.get_next();
2747     node->print_map(st, this->is_class());
2748     i ++;
2749   }
2750 }
2751 
2752 // MetaspaceGC methods
2753 
2754 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2755 // Within the VM operation after the GC the attempt to allocate the metadata
2756 // should succeed.  If the GC did not free enough space for the metaspace
2757 // allocation, the HWM is increased so that another virtualspace will be
2758 // allocated for the metadata.  With perm gen the increase in the perm
2759 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2760 // metaspace policy uses those as the small and large steps for the HWM.
2761 //
2762 // After the GC the compute_new_size() for MetaspaceGC is called to
2763 // resize the capacity of the metaspaces.  The current implementation
2764 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2765 // to resize the Java heap by some GC's.  New flags can be implemented
2766 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2767 // free space is desirable in the metaspace capacity to decide how much
2768 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2769 // free space is desirable in the metaspace capacity before decreasing
2770 // the HWM.
2771 
2772 // Calculate the amount to increase the high water mark (HWM).
2773 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2774 // another expansion is not requested too soon.  If that is not
2775 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2776 // If that is still not enough, expand by the size of the allocation
2777 // plus some.
2778 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2779   size_t min_delta = MinMetaspaceExpansion;
2780   size_t max_delta = MaxMetaspaceExpansion;
2781   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2782 
2783   if (delta <= min_delta) {
2784     delta = min_delta;
2785   } else if (delta <= max_delta) {
2786     // Don't want to hit the high water mark on the next
2787     // allocation so make the delta greater than just enough
2788     // for this allocation.
2789     delta = max_delta;
2790   } else {
2791     // This allocation is large but the next ones are probably not
2792     // so increase by the minimum.
2793     delta = delta + min_delta;
2794   }
2795 
2796   assert_is_aligned(delta, Metaspace::commit_alignment());
2797 
2798   return delta;
2799 }
2800 
2801 size_t MetaspaceGC::capacity_until_GC() {
2802   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2803   assert(value >= MetaspaceSize, "Not initialized properly?");
2804   return value;
2805 }
2806 
2807 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2808   assert_is_aligned(v, Metaspace::commit_alignment());
2809 
2810   intptr_t capacity_until_GC = _capacity_until_GC;
2811   intptr_t new_value = capacity_until_GC + v;
2812 
2813   if (new_value < capacity_until_GC) {
2814     // The addition wrapped around, set new_value to aligned max value.
2815     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2816   }
2817 
2818   intptr_t expected = _capacity_until_GC;
2819   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2820 
2821   if (expected != actual) {
2822     return false;
2823   }
2824 
2825   if (new_cap_until_GC != NULL) {
2826     *new_cap_until_GC = new_value;
2827   }
2828   if (old_cap_until_GC != NULL) {
2829     *old_cap_until_GC = capacity_until_GC;
2830   }
2831   return true;
2832 }
2833 
2834 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2835   assert_is_aligned(v, Metaspace::commit_alignment());
2836 
2837   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2838 }
2839 
2840 void MetaspaceGC::initialize() {
2841   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2842   // we can't do a GC during initialization.
2843   _capacity_until_GC = MaxMetaspaceSize;
2844 }
2845 
2846 void MetaspaceGC::post_initialize() {
2847   // Reset the high-water mark once the VM initialization is done.
2848   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2849 }
2850 
2851 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2852   // Check if the compressed class space is full.
2853   if (is_class && Metaspace::using_class_space()) {
2854     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2855     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2856       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2857                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2858       return false;
2859     }
2860   }
2861 
2862   // Check if the user has imposed a limit on the metaspace memory.
2863   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2864   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2865     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2866               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2867     return false;
2868   }
2869 
2870   return true;
2871 }
2872 
2873 size_t MetaspaceGC::allowed_expansion() {
2874   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2875   size_t capacity_until_gc = capacity_until_GC();
2876 
2877   assert(capacity_until_gc >= committed_bytes,
2878          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2879          capacity_until_gc, committed_bytes);
2880 
2881   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2882   size_t left_until_GC = capacity_until_gc - committed_bytes;
2883   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2884   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2885             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2886             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2887 
2888   return left_to_commit / BytesPerWord;
2889 }
2890 
2891 void MetaspaceGC::compute_new_size() {
2892   assert(_shrink_factor <= 100, "invalid shrink factor");
2893   uint current_shrink_factor = _shrink_factor;
2894   _shrink_factor = 0;
2895 
2896   // Using committed_bytes() for used_after_gc is an overestimation, since the
2897   // chunk free lists are included in committed_bytes() and the memory in an
2898   // un-fragmented chunk free list is available for future allocations.
2899   // However, if the chunk free lists becomes fragmented, then the memory may
2900   // not be available for future allocations and the memory is therefore "in use".
2901   // Including the chunk free lists in the definition of "in use" is therefore
2902   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2903   // shrink below committed_bytes() and this has caused serious bugs in the past.
2904   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2905   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2906 
2907   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2908   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2909 
2910   const double min_tmp = used_after_gc / maximum_used_percentage;
2911   size_t minimum_desired_capacity =
2912     (size_t)MIN2(min_tmp, double(max_uintx));
2913   // Don't shrink less than the initial generation size
2914   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2915                                   MetaspaceSize);
2916 
2917   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2918   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2919                            minimum_free_percentage, maximum_used_percentage);
2920   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2921 
2922 
2923   size_t shrink_bytes = 0;
2924   if (capacity_until_GC < minimum_desired_capacity) {
2925     // If we have less capacity below the metaspace HWM, then
2926     // increment the HWM.
2927     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2928     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2929     // Don't expand unless it's significant
2930     if (expand_bytes >= MinMetaspaceExpansion) {
2931       size_t new_capacity_until_GC = 0;
2932       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2933       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2934 
2935       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2936                                                new_capacity_until_GC,
2937                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2938       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2939                                minimum_desired_capacity / (double) K,
2940                                expand_bytes / (double) K,
2941                                MinMetaspaceExpansion / (double) K,
2942                                new_capacity_until_GC / (double) K);
2943     }
2944     return;
2945   }
2946 
2947   // No expansion, now see if we want to shrink
2948   // We would never want to shrink more than this
2949   assert(capacity_until_GC >= minimum_desired_capacity,
2950          SIZE_FORMAT " >= " SIZE_FORMAT,
2951          capacity_until_GC, minimum_desired_capacity);
2952   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2953 
2954   // Should shrinking be considered?
2955   if (MaxMetaspaceFreeRatio < 100) {
2956     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2957     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2958     const double max_tmp = used_after_gc / minimum_used_percentage;
2959     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2960     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2961                                     MetaspaceSize);
2962     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2963                              maximum_free_percentage, minimum_used_percentage);
2964     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2965                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2966 
2967     assert(minimum_desired_capacity <= maximum_desired_capacity,
2968            "sanity check");
2969 
2970     if (capacity_until_GC > maximum_desired_capacity) {
2971       // Capacity too large, compute shrinking size
2972       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2973       // We don't want shrink all the way back to initSize if people call
2974       // System.gc(), because some programs do that between "phases" and then
2975       // we'd just have to grow the heap up again for the next phase.  So we
2976       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2977       // on the third call, and 100% by the fourth call.  But if we recompute
2978       // size without shrinking, it goes back to 0%.
2979       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2980 
2981       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2982 
2983       assert(shrink_bytes <= max_shrink_bytes,
2984              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2985              shrink_bytes, max_shrink_bytes);
2986       if (current_shrink_factor == 0) {
2987         _shrink_factor = 10;
2988       } else {
2989         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2990       }
2991       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2992                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2993       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2994                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2995     }
2996   }
2997 
2998   // Don't shrink unless it's significant
2999   if (shrink_bytes >= MinMetaspaceExpansion &&
3000       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
3001     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
3002     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
3003                                              new_capacity_until_GC,
3004                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
3005   }
3006 }
3007 
3008 // Metadebug methods
3009 
3010 void Metadebug::init_allocation_fail_alot_count() {
3011   if (MetadataAllocationFailALot) {
3012     _allocation_fail_alot_count =
3013       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
3014   }
3015 }
3016 
3017 #ifdef ASSERT
3018 bool Metadebug::test_metadata_failure() {
3019   if (MetadataAllocationFailALot &&
3020       Threads::is_vm_complete()) {
3021     if (_allocation_fail_alot_count > 0) {
3022       _allocation_fail_alot_count--;
3023     } else {
3024       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
3025       init_allocation_fail_alot_count();
3026       return true;
3027     }
3028   }
3029   return false;
3030 }
3031 #endif
3032 
3033 // ChunkManager methods
3034 size_t ChunkManager::free_chunks_total_words() {
3035   return _free_chunks_total;
3036 }
3037 
3038 size_t ChunkManager::free_chunks_total_bytes() {
3039   return free_chunks_total_words() * BytesPerWord;
3040 }
3041 
3042 // Update internal accounting after a chunk was added
3043 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
3044   assert_lock_strong(MetaspaceExpand_lock);
3045   _free_chunks_count ++;
3046   _free_chunks_total += c->word_size();
3047 }
3048 
3049 // Update internal accounting after a chunk was removed
3050 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
3051   assert_lock_strong(MetaspaceExpand_lock);
3052   assert(_free_chunks_count >= 1,
3053     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
3054   assert(_free_chunks_total >= c->word_size(),
3055     "ChunkManager::_free_chunks_total: about to go negative"
3056      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
3057   _free_chunks_count --;
3058   _free_chunks_total -= c->word_size();
3059 }
3060 
3061 size_t ChunkManager::free_chunks_count() {
3062 #ifdef ASSERT
3063   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
3064     MutexLockerEx cl(MetaspaceExpand_lock,
3065                      Mutex::_no_safepoint_check_flag);
3066     // This lock is only needed in debug because the verification
3067     // of the _free_chunks_totals walks the list of free chunks
3068     slow_locked_verify_free_chunks_count();
3069   }
3070 #endif
3071   return _free_chunks_count;
3072 }
3073 
3074 ChunkIndex ChunkManager::list_index(size_t size) {
3075   return get_chunk_type_by_size(size, is_class());
3076 }
3077 
3078 size_t ChunkManager::size_by_index(ChunkIndex index) const {
3079   index_bounds_check(index);
3080   assert(index != HumongousIndex, "Do not call for humongous chunks.");
3081   return get_size_for_nonhumongous_chunktype(index, is_class());
3082 }
3083 
3084 void ChunkManager::locked_verify_free_chunks_total() {
3085   assert_lock_strong(MetaspaceExpand_lock);
3086   assert(sum_free_chunks() == _free_chunks_total,
3087          "_free_chunks_total " SIZE_FORMAT " is not the"
3088          " same as sum " SIZE_FORMAT, _free_chunks_total,
3089          sum_free_chunks());
3090 }
3091 
3092 void ChunkManager::verify_free_chunks_total() {
3093   MutexLockerEx cl(MetaspaceExpand_lock,
3094                      Mutex::_no_safepoint_check_flag);
3095   locked_verify_free_chunks_total();
3096 }
3097 
3098 void ChunkManager::locked_verify_free_chunks_count() {
3099   assert_lock_strong(MetaspaceExpand_lock);
3100   assert(sum_free_chunks_count() == _free_chunks_count,
3101          "_free_chunks_count " SIZE_FORMAT " is not the"
3102          " same as sum " SIZE_FORMAT, _free_chunks_count,
3103          sum_free_chunks_count());
3104 }
3105 
3106 void ChunkManager::verify_free_chunks_count() {
3107 #ifdef ASSERT
3108   MutexLockerEx cl(MetaspaceExpand_lock,
3109                      Mutex::_no_safepoint_check_flag);
3110   locked_verify_free_chunks_count();
3111 #endif
3112 }
3113 
3114 void ChunkManager::verify() {
3115   MutexLockerEx cl(MetaspaceExpand_lock,
3116                      Mutex::_no_safepoint_check_flag);
3117   locked_verify();
3118 }
3119 
3120 void ChunkManager::locked_verify() {
3121   locked_verify_free_chunks_count();
3122   locked_verify_free_chunks_total();
3123   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3124     ChunkList* list = free_chunks(i);
3125     if (list != NULL) {
3126       Metachunk* chunk = list->head();
3127       while (chunk) {
3128         DEBUG_ONLY(do_verify_chunk(chunk);)
3129         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
3130         chunk = chunk->next();
3131       }
3132     }
3133   }
3134 }
3135 
3136 void ChunkManager::locked_print_free_chunks(outputStream* st) {
3137   assert_lock_strong(MetaspaceExpand_lock);
3138   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
3139                 _free_chunks_total, _free_chunks_count);
3140 }
3141 
3142 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
3143   assert_lock_strong(MetaspaceExpand_lock);
3144   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
3145                 sum_free_chunks(), sum_free_chunks_count());
3146 }
3147 
3148 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
3149   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
3150          "Bad index: %d", (int)index);
3151 
3152   return &_free_chunks[index];
3153 }
3154 
3155 // These methods that sum the free chunk lists are used in printing
3156 // methods that are used in product builds.
3157 size_t ChunkManager::sum_free_chunks() {
3158   assert_lock_strong(MetaspaceExpand_lock);
3159   size_t result = 0;
3160   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3161     ChunkList* list = free_chunks(i);
3162 
3163     if (list == NULL) {
3164       continue;
3165     }
3166 
3167     result = result + list->count() * list->size();
3168   }
3169   result = result + humongous_dictionary()->total_size();
3170   return result;
3171 }
3172 
3173 size_t ChunkManager::sum_free_chunks_count() {
3174   assert_lock_strong(MetaspaceExpand_lock);
3175   size_t count = 0;
3176   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3177     ChunkList* list = free_chunks(i);
3178     if (list == NULL) {
3179       continue;
3180     }
3181     count = count + list->count();
3182   }
3183   count = count + humongous_dictionary()->total_free_blocks();
3184   return count;
3185 }
3186 
3187 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
3188   ChunkIndex index = list_index(word_size);
3189   assert(index < HumongousIndex, "No humongous list");
3190   return free_chunks(index);
3191 }
3192 
3193 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
3194 // split up the larger chunk into n smaller chunks, at least one of which should be
3195 // the target chunk of target chunk size. The smaller chunks, including the target
3196 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
3197 // Note that this chunk is supposed to be removed from the freelist right away.
3198 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
3199   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
3200 
3201   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
3202   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
3203 
3204   MetaWord* const region_start = (MetaWord*)larger_chunk;
3205   const size_t region_word_len = larger_chunk->word_size();
3206   MetaWord* const region_end = region_start + region_word_len;
3207   VirtualSpaceNode* const vsn = larger_chunk->container();
3208   OccupancyMap* const ocmap = vsn->occupancy_map();
3209 
3210   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
3211   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
3212   // at an address suitable to place the smaller target chunk.
3213   assert_is_aligned(region_start, target_chunk_word_size);
3214 
3215   // Remove old chunk.
3216   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
3217   larger_chunk->remove_sentinel();
3218 
3219   // Prevent access to the old chunk from here on.
3220   larger_chunk = NULL;
3221   // ... and wipe it.
3222   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
3223 
3224   // In its place create first the target chunk...
3225   MetaWord* p = region_start;
3226   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
3227   assert(target_chunk == (Metachunk*)p, "Sanity");
3228   target_chunk->set_origin(origin_split);
3229 
3230   // Note: we do not need to mark its start in the occupancy map
3231   // because it coincides with the old chunk start.
3232 
3233   // Mark chunk as free and return to the freelist.
3234   do_update_in_use_info_for_chunk(target_chunk, false);
3235   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
3236 
3237   // This chunk should now be valid and can be verified.
3238   DEBUG_ONLY(do_verify_chunk(target_chunk));
3239 
3240   // In the remaining space create the remainder chunks.
3241   p += target_chunk->word_size();
3242   assert(p < region_end, "Sanity");
3243 
3244   while (p < region_end) {
3245 
3246     // Find the largest chunk size which fits the alignment requirements at address p.
3247     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
3248     size_t this_chunk_word_size = 0;
3249     for(;;) {
3250       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
3251       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
3252         break;
3253       } else {
3254         this_chunk_index = prev_chunk_index(this_chunk_index);
3255         assert(this_chunk_index >= target_chunk_index, "Sanity");
3256       }
3257     }
3258 
3259     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
3260     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
3261     assert(p + this_chunk_word_size <= region_end, "Sanity");
3262 
3263     // Create splitting chunk.
3264     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
3265     assert(this_chunk == (Metachunk*)p, "Sanity");
3266     this_chunk->set_origin(origin_split);
3267     ocmap->set_chunk_starts_at_address(p, true);
3268     do_update_in_use_info_for_chunk(this_chunk, false);
3269 
3270     // This chunk should be valid and can be verified.
3271     DEBUG_ONLY(do_verify_chunk(this_chunk));
3272 
3273     // Return this chunk to freelist and correct counter.
3274     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
3275     _free_chunks_count ++;
3276 
3277     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
3278       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
3279       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
3280       p2i(region_start), p2i(region_end));
3281 
3282     p += this_chunk_word_size;
3283 
3284   }
3285 
3286   return target_chunk;
3287 }
3288 
3289 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
3290   assert_lock_strong(MetaspaceExpand_lock);
3291 
3292   slow_locked_verify();
3293 
3294   Metachunk* chunk = NULL;
3295   bool we_did_split_a_chunk = false;
3296 
3297   if (list_index(word_size) != HumongousIndex) {
3298 
3299     ChunkList* free_list = find_free_chunks_list(word_size);
3300     assert(free_list != NULL, "Sanity check");
3301 
3302     chunk = free_list->head();
3303 
3304     if (chunk == NULL) {
3305       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
3306       // This is the counterpart of the coalescing-upon-chunk-return.
3307 
3308       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
3309 
3310       // Is there a larger chunk we could split?
3311       Metachunk* larger_chunk = NULL;
3312       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
3313       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
3314         larger_chunk = free_chunks(larger_chunk_index)->head();
3315         if (larger_chunk == NULL) {
3316           larger_chunk_index = next_chunk_index(larger_chunk_index);
3317         }
3318       }
3319 
3320       if (larger_chunk != NULL) {
3321         assert(larger_chunk->word_size() > word_size, "Sanity");
3322         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
3323 
3324         // We found a larger chunk. Lets split it up:
3325         // - remove old chunk
3326         // - in its place, create new smaller chunks, with at least one chunk
3327         //   being of target size, the others sized as large as possible. This
3328         //   is to make sure the resulting chunks are "as coalesced as possible"
3329         //   (similar to VirtualSpaceNode::retire()).
3330         // Note: during this operation both ChunkManager and VirtualSpaceNode
3331         //  are temporarily invalid, so be careful with asserts.
3332 
3333         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
3334            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
3335           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
3336           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
3337 
3338         chunk = split_chunk(word_size, larger_chunk);
3339 
3340         // This should have worked.
3341         assert(chunk != NULL, "Sanity");
3342         assert(chunk->word_size() == word_size, "Sanity");
3343         assert(chunk->is_tagged_free(), "Sanity");
3344 
3345         we_did_split_a_chunk = true;
3346 
3347       }
3348     }
3349 
3350     if (chunk == NULL) {
3351       return NULL;
3352     }
3353 
3354     // Remove the chunk as the head of the list.
3355     free_list->remove_chunk(chunk);
3356 
3357     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
3358                                        p2i(free_list), free_list->count());
3359 
3360   } else {
3361     chunk = humongous_dictionary()->get_chunk(word_size);
3362 
3363     if (chunk == NULL) {
3364       return NULL;
3365     }
3366 
3367     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
3368                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
3369   }
3370 
3371   // Chunk has been removed from the chunk manager; update counters.
3372   account_for_removed_chunk(chunk);
3373   do_update_in_use_info_for_chunk(chunk, true);
3374   chunk->container()->inc_container_count();
3375   chunk->inc_use_count();
3376 
3377   // Remove it from the links to this freelist
3378   chunk->set_next(NULL);
3379   chunk->set_prev(NULL);
3380 
3381   // Run some verifications (some more if we did a chunk split)
3382 #ifdef ASSERT
3383   if (VerifyMetaspace) {
3384     locked_verify();
3385     VirtualSpaceNode* const vsn = chunk->container();
3386     vsn->verify();
3387     if (we_did_split_a_chunk) {
3388       vsn->verify_free_chunks_are_ideally_merged();
3389     }
3390   }
3391 #endif
3392 
3393   return chunk;
3394 }
3395 
3396 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
3397   assert_lock_strong(MetaspaceExpand_lock);
3398   slow_locked_verify();
3399 
3400   // Take from the beginning of the list
3401   Metachunk* chunk = free_chunks_get(word_size);
3402   if (chunk == NULL) {
3403     return NULL;
3404   }
3405 
3406   assert((word_size <= chunk->word_size()) ||
3407          (list_index(chunk->word_size()) == HumongousIndex),
3408          "Non-humongous variable sized chunk");
3409   LogTarget(Debug, gc, metaspace, freelist) lt;
3410   if (lt.is_enabled()) {
3411     size_t list_count;
3412     if (list_index(word_size) < HumongousIndex) {
3413       ChunkList* list = find_free_chunks_list(word_size);
3414       list_count = list->count();
3415     } else {
3416       list_count = humongous_dictionary()->total_count();
3417     }
3418     LogStream ls(lt);
3419     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3420              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3421     ResourceMark rm;
3422     locked_print_free_chunks(&ls);
3423   }
3424 
3425   return chunk;
3426 }
3427 
3428 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3429   assert_lock_strong(MetaspaceExpand_lock);
3430   DEBUG_ONLY(do_verify_chunk(chunk);)
3431   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3432   assert(chunk != NULL, "Expected chunk.");
3433   assert(chunk->container() != NULL, "Container should have been set.");
3434   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3435   index_bounds_check(index);
3436 
3437   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3438   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3439   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3440   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3441 
3442   if (index != HumongousIndex) {
3443     // Return non-humongous chunk to freelist.
3444     ChunkList* list = free_chunks(index);
3445     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3446     list->return_chunk_at_head(chunk);
3447     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3448         chunk_size_name(index), p2i(chunk));
3449   } else {
3450     // Return humongous chunk to dictionary.
3451     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3452     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3453            "Humongous chunk has wrong alignment.");
3454     _humongous_dictionary.return_chunk(chunk);
3455     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3456         chunk_size_name(index), p2i(chunk), chunk->word_size());
3457   }
3458   chunk->container()->dec_container_count();
3459   do_update_in_use_info_for_chunk(chunk, false);
3460 
3461   // Chunk has been added; update counters.
3462   account_for_added_chunk(chunk);
3463 
3464   // Attempt coalesce returned chunks with its neighboring chunks:
3465   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3466   if (index == SmallIndex || index == SpecializedIndex) {
3467     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3468       // This did not work. But if this chunk is special, we still may form a small chunk?
3469       if (index == SpecializedIndex) {
3470         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3471           // give up.
3472         }
3473       }
3474     }
3475   }
3476 
3477 }
3478 
3479 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3480   index_bounds_check(index);
3481   if (chunks == NULL) {
3482     return;
3483   }
3484   LogTarget(Trace, gc, metaspace, freelist) log;
3485   if (log.is_enabled()) { // tracing
3486     log.print("returning list of %s chunks...", chunk_size_name(index));
3487   }
3488   unsigned num_chunks_returned = 0;
3489   size_t size_chunks_returned = 0;
3490   Metachunk* cur = chunks;
3491   while (cur != NULL) {
3492     // Capture the next link before it is changed
3493     // by the call to return_chunk_at_head();
3494     Metachunk* next = cur->next();
3495     if (log.is_enabled()) { // tracing
3496       num_chunks_returned ++;
3497       size_chunks_returned += cur->word_size();
3498     }
3499     return_single_chunk(index, cur);
3500     cur = next;
3501   }
3502   if (log.is_enabled()) { // tracing
3503     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3504         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3505     if (index != HumongousIndex) {
3506       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3507     } else {
3508       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3509     }
3510   }
3511 }
3512 
3513 void ChunkManager::print_on(outputStream* out) const {
3514   _humongous_dictionary.report_statistics(out);
3515 }
3516 
3517 void ChunkManager::get_statistics(ChunkManagerStatistics* out) const {
3518   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3519   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3520     out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
3521   }
3522 }
3523 
3524 // SpaceManager methods
3525 
3526 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3527   size_t chunk_sizes[] = {
3528       specialized_chunk_size(is_class_space),
3529       small_chunk_size(is_class_space),
3530       medium_chunk_size(is_class_space)
3531   };
3532 
3533   // Adjust up to one of the fixed chunk sizes ...
3534   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3535     if (requested <= chunk_sizes[i]) {
3536       return chunk_sizes[i];
3537     }
3538   }
3539 
3540   // ... or return the size as a humongous chunk.
3541   return requested;
3542 }
3543 
3544 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3545   return adjust_initial_chunk_size(requested, is_class());
3546 }
3547 
3548 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3549   size_t requested;
3550 
3551   if (is_class()) {
3552     switch (type) {
3553     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3554     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3555     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3556     default:                                 requested = ClassSmallChunk; break;
3557     }
3558   } else {
3559     switch (type) {
3560     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3561     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3562     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3563     default:                                 requested = SmallChunk; break;
3564     }
3565   }
3566 
3567   // Adjust to one of the fixed chunk sizes (unless humongous)
3568   const size_t adjusted = adjust_initial_chunk_size(requested);
3569 
3570   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3571          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3572 
3573   return adjusted;
3574 }
3575 
3576 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3577   size_t count = 0;
3578   Metachunk* chunk = chunks_in_use(i);
3579   while (chunk != NULL) {
3580     count++;
3581     chunk = chunk->next();
3582   }
3583   return count;
3584 }
3585 
3586 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3587 
3588   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3589     Metachunk* chunk = chunks_in_use(i);
3590     st->print("SpaceManager: %s " PTR_FORMAT,
3591                  chunk_size_name(i), p2i(chunk));
3592     if (chunk != NULL) {
3593       st->print_cr(" free " SIZE_FORMAT,
3594                    chunk->free_word_size());
3595     } else {
3596       st->cr();
3597     }
3598   }
3599 
3600   chunk_manager()->locked_print_free_chunks(st);
3601   chunk_manager()->locked_print_sum_free_chunks(st);
3602 }
3603 
3604 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3605 
3606   // Decide between a small chunk and a medium chunk.  Up to
3607   // _small_chunk_limit small chunks can be allocated.
3608   // After that a medium chunk is preferred.
3609   size_t chunk_word_size;
3610 
3611   // Special case for anonymous metadata space.
3612   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3613   // rarely about 4K (64-bits JVM).
3614   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3615   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3616   // reduces space waste from 60+% to around 30%.
3617   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3618       _mdtype == Metaspace::NonClassType &&
3619       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3620       word_size + Metachunk::overhead() <= SpecializedChunk) {
3621     return SpecializedChunk;
3622   }
3623 
3624   if (chunks_in_use(MediumIndex) == NULL &&
3625       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3626     chunk_word_size = (size_t) small_chunk_size();
3627     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3628       chunk_word_size = medium_chunk_size();
3629     }
3630   } else {
3631     chunk_word_size = medium_chunk_size();
3632   }
3633 
3634   // Might still need a humongous chunk.  Enforce
3635   // humongous allocations sizes to be aligned up to
3636   // the smallest chunk size.
3637   size_t if_humongous_sized_chunk =
3638     align_up(word_size + Metachunk::overhead(),
3639                   smallest_chunk_size());
3640   chunk_word_size =
3641     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3642 
3643   assert(!SpaceManager::is_humongous(word_size) ||
3644          chunk_word_size == if_humongous_sized_chunk,
3645          "Size calculation is wrong, word_size " SIZE_FORMAT
3646          " chunk_word_size " SIZE_FORMAT,
3647          word_size, chunk_word_size);
3648   Log(gc, metaspace, alloc) log;
3649   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3650     log.debug("Metadata humongous allocation:");
3651     log.debug("  word_size " PTR_FORMAT, word_size);
3652     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3653     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3654   }
3655   return chunk_word_size;
3656 }
3657 
3658 void SpaceManager::track_metaspace_memory_usage() {
3659   if (is_init_completed()) {
3660     if (is_class()) {
3661       MemoryService::track_compressed_class_memory_usage();
3662     }
3663     MemoryService::track_metaspace_memory_usage();
3664   }
3665 }
3666 
3667 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3668   assert(vs_list()->current_virtual_space() != NULL,
3669          "Should have been set");
3670   assert(current_chunk() == NULL ||
3671          current_chunk()->allocate(word_size) == NULL,
3672          "Don't need to expand");
3673   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3674 
3675   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3676     size_t words_left = 0;
3677     size_t words_used = 0;
3678     if (current_chunk() != NULL) {
3679       words_left = current_chunk()->free_word_size();
3680       words_used = current_chunk()->used_word_size();
3681     }
3682     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3683                                        word_size, words_used, words_left);
3684   }
3685 
3686   // Get another chunk
3687   size_t chunk_word_size = calc_chunk_size(word_size);
3688   Metachunk* next = get_new_chunk(chunk_word_size);
3689 
3690   MetaWord* mem = NULL;
3691 
3692   // If a chunk was available, add it to the in-use chunk list
3693   // and do an allocation from it.
3694   if (next != NULL) {
3695     // Add to this manager's list of chunks in use.
3696     add_chunk(next, false);
3697     mem = next->allocate(word_size);
3698   }
3699 
3700   // Track metaspace memory usage statistic.
3701   track_metaspace_memory_usage();
3702 
3703   return mem;
3704 }
3705 
3706 void SpaceManager::print_on(outputStream* st) const {
3707   SpaceManagerStatistics stat;
3708   add_to_statistics(&stat); // will lock _lock.
3709   stat.print_on(st, 1*K, false);
3710 }
3711 
3712 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3713                            Metaspace::MetaspaceType space_type,
3714                            Mutex* lock) :
3715   _mdtype(mdtype),
3716   _space_type(space_type),
3717   _allocated_block_words(0),
3718   _allocated_chunks_words(0),
3719   _allocated_chunks_count(0),
3720   _block_freelists(NULL),
3721   _lock(lock)
3722 {
3723   initialize();
3724 }
3725 
3726 void SpaceManager::inc_size_metrics(size_t words) {
3727   assert_lock_strong(MetaspaceExpand_lock);
3728   // Total of allocated Metachunks and allocated Metachunks count
3729   // for each SpaceManager
3730   _allocated_chunks_words = _allocated_chunks_words + words;
3731   _allocated_chunks_count++;
3732 
3733   // Global total of capacity in allocated Metachunks
3734   MetaspaceUtils::inc_capacity(mdtype(), words);
3735   // Global total of allocated Metablocks.
3736   // used_words_slow() includes the overhead in each
3737   // Metachunk so include it in the used when the
3738   // Metachunk is first added (so only added once per
3739   // Metachunk).
3740   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3741 }
3742 
3743 void SpaceManager::inc_used_metrics(size_t words) {
3744   // Add to the per SpaceManager total
3745   Atomic::add(words, &_allocated_block_words);
3746   // Add to the global total
3747   MetaspaceUtils::inc_used(mdtype(), words);
3748 }
3749 
3750 void SpaceManager::dec_total_from_size_metrics() {
3751   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3752   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3753 }
3754 
3755 void SpaceManager::initialize() {
3756   Metadebug::init_allocation_fail_alot_count();
3757   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3758     _chunks_in_use[i] = NULL;
3759   }
3760   _current_chunk = NULL;
3761   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3762 }
3763 
3764 SpaceManager::~SpaceManager() {
3765 
3766   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3767   DEBUG_ONLY(verify_metrics());
3768 
3769   MutexLockerEx fcl(MetaspaceExpand_lock,
3770                     Mutex::_no_safepoint_check_flag);
3771 
3772   chunk_manager()->slow_locked_verify();
3773 
3774   dec_total_from_size_metrics();
3775 
3776   Log(gc, metaspace, freelist) log;
3777   if (log.is_trace()) {
3778     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3779     ResourceMark rm;
3780     LogStream ls(log.trace());
3781     locked_print_chunks_in_use_on(&ls);
3782     if (block_freelists() != NULL) {
3783       block_freelists()->print_on(&ls);
3784     }
3785   }
3786 
3787   // Add all the chunks in use by this space manager
3788   // to the global list of free chunks.
3789 
3790   // Follow each list of chunks-in-use and add them to the
3791   // free lists.  Each list is NULL terminated.
3792 
3793   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3794     Metachunk* chunks = chunks_in_use(i);
3795     chunk_manager()->return_chunk_list(i, chunks);
3796     set_chunks_in_use(i, NULL);
3797   }
3798 
3799   chunk_manager()->slow_locked_verify();
3800 
3801   if (_block_freelists != NULL) {
3802     delete _block_freelists;
3803   }
3804 }
3805 
3806 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3807   assert_lock_strong(lock());
3808   // Allocations and deallocations are in raw_word_size
3809   size_t raw_word_size = get_allocation_word_size(word_size);
3810   // Lazily create a block_freelist
3811   if (block_freelists() == NULL) {
3812     _block_freelists = new BlockFreelist();
3813   }
3814   block_freelists()->return_block(p, raw_word_size);
3815 }
3816 
3817 // Adds a chunk to the list of chunks in use.
3818 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3819 
3820   assert(new_chunk != NULL, "Should not be NULL");
3821   assert(new_chunk->next() == NULL, "Should not be on a list");
3822 
3823   new_chunk->reset_empty();
3824 
3825   // Find the correct list and and set the current
3826   // chunk for that list.
3827   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3828 
3829   if (index != HumongousIndex) {
3830     retire_current_chunk();
3831     set_current_chunk(new_chunk);
3832     new_chunk->set_next(chunks_in_use(index));
3833     set_chunks_in_use(index, new_chunk);
3834   } else {
3835     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3836     // small, so small will be null.  Link this first chunk as the current
3837     // chunk.
3838     if (make_current) {
3839       // Set as the current chunk but otherwise treat as a humongous chunk.
3840       set_current_chunk(new_chunk);
3841     }
3842     // Link at head.  The _current_chunk only points to a humongous chunk for
3843     // the null class loader metaspace (class and data virtual space managers)
3844     // any humongous chunks so will not point to the tail
3845     // of the humongous chunks list.
3846     new_chunk->set_next(chunks_in_use(HumongousIndex));
3847     set_chunks_in_use(HumongousIndex, new_chunk);
3848 
3849     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3850   }
3851 
3852   // Add to the running sum of capacity
3853   inc_size_metrics(new_chunk->word_size());
3854 
3855   assert(new_chunk->is_empty(), "Not ready for reuse");
3856   Log(gc, metaspace, freelist) log;
3857   if (log.is_trace()) {
3858     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", _allocated_chunks_count);
3859     ResourceMark rm;
3860     LogStream ls(log.trace());
3861     new_chunk->print_on(&ls);
3862     chunk_manager()->locked_print_free_chunks(&ls);
3863   }
3864 }
3865 
3866 void SpaceManager::retire_current_chunk() {
3867   if (current_chunk() != NULL) {
3868     size_t remaining_words = current_chunk()->free_word_size();
3869     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3870       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3871       deallocate(ptr, remaining_words);
3872       inc_used_metrics(remaining_words);
3873     }
3874   }
3875 }
3876 
3877 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3878   // Get a chunk from the chunk freelist
3879   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3880 
3881   if (next == NULL) {
3882     next = vs_list()->get_new_chunk(chunk_word_size,
3883                                     medium_chunk_bunch());
3884   }
3885 
3886   Log(gc, metaspace, alloc) log;
3887   if (log.is_debug() && next != NULL &&
3888       SpaceManager::is_humongous(next->word_size())) {
3889     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3890   }
3891 
3892   return next;
3893 }
3894 
3895 MetaWord* SpaceManager::allocate(size_t word_size) {
3896   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3897   size_t raw_word_size = get_allocation_word_size(word_size);
3898   BlockFreelist* fl =  block_freelists();
3899   MetaWord* p = NULL;
3900   // Allocation from the dictionary is expensive in the sense that
3901   // the dictionary has to be searched for a size.  Don't allocate
3902   // from the dictionary until it starts to get fat.  Is this
3903   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3904   // for allocations.  Do some profiling.  JJJ
3905   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3906     p = fl->get_block(raw_word_size);
3907     if (p != NULL) {
3908       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3909     }
3910   }
3911   if (p == NULL) {
3912     p = allocate_work(raw_word_size);
3913   }
3914 
3915   return p;
3916 }
3917 
3918 // Returns the address of spaced allocated for "word_size".
3919 // This methods does not know about blocks (Metablocks)
3920 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3921   assert_lock_strong(lock());
3922 #ifdef ASSERT
3923   if (Metadebug::test_metadata_failure()) {
3924     return NULL;
3925   }
3926 #endif
3927   // Is there space in the current chunk?
3928   MetaWord* result = NULL;
3929 
3930   if (current_chunk() != NULL) {
3931     result = current_chunk()->allocate(word_size);
3932   }
3933 
3934   if (result == NULL) {
3935     result = grow_and_allocate(word_size);
3936   }
3937 
3938   if (result != NULL) {
3939     inc_used_metrics(word_size);
3940     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3941            "Head of the list is being allocated");
3942   }
3943 
3944   return result;
3945 }
3946 
3947 void SpaceManager::verify() {
3948   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3949     Metachunk* curr = chunks_in_use(i);
3950     while (curr != NULL) {
3951       DEBUG_ONLY(do_verify_chunk(curr);)
3952       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3953       curr = curr->next();
3954     }
3955   }
3956 }
3957 
3958 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3959   assert(is_humongous(chunk->word_size()) ||
3960          chunk->word_size() == medium_chunk_size() ||
3961          chunk->word_size() == small_chunk_size() ||
3962          chunk->word_size() == specialized_chunk_size(),
3963          "Chunk size is wrong");
3964   return;
3965 }
3966 
3967 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3968   assert_lock_strong(lock());
3969   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3970     UsedChunksStatistics& chunk_stat = out->chunk_stats(i);
3971     Metachunk* chunk = chunks_in_use(i);
3972     while (chunk != NULL) {
3973       chunk_stat.add_num(1);
3974       chunk_stat.add_cap(chunk->word_size());
3975       chunk_stat.add_used(chunk->used_word_size());
3976       if (chunk != current_chunk()) {
3977         chunk_stat.add_waste(chunk->free_word_size());
3978       } else {
3979         chunk_stat.add_free(chunk->free_word_size());
3980       }
3981       chunk = chunk->next();
3982     }
3983   }
3984   if (block_freelists() != NULL) {
3985     out->add_free_blocks(block_freelists()->num_blocks(), block_freelists()->total_size());
3986   }
3987 }
3988 
3989 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3990   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3991   add_to_statistics_locked(out);
3992 }
3993 
3994 #ifdef ASSERT
3995 void SpaceManager::verify_metrics_locked() const {
3996   assert_lock_strong(lock());
3997 
3998   SpaceManagerStatistics stat;
3999   add_to_statistics_locked(&stat);
4000 
4001   UsedChunksStatistics chunk_stats = stat.totals();
4002 
4003   assert_counter(_allocated_block_words, chunk_stats.used(), "SpaceManager::_allocated_blocks_words");
4004   assert_counter(_allocated_chunks_words, chunk_stats.cap(), "SpaceManager::_allocated_chunks_words");
4005   assert_counter(_allocated_chunks_count, chunk_stats.num(), "SpaceManager::_allocated_chunks_count");
4006 }
4007 
4008 void SpaceManager::verify_metrics() const {
4009   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4010   verify_metrics_locked();
4011 }
4012 #endif // ASSERT
4013 
4014 
4015 // MetaspaceUtils
4016 
4017 
4018 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
4019 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
4020 
4021 
4022 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
4023 // output will be the accumulated values for all live metaspaces.
4024 // Note: method does not do any locking.
4025 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
4026   out->reset();
4027   ClassLoaderDataGraphMetaspaceIterator iter;
4028    while (iter.repeat()) {
4029      ClassLoaderMetaspace* msp = iter.get_next();
4030      if (msp != NULL) {
4031        msp->add_to_statistics(out);
4032      }
4033    }
4034 }
4035 
4036 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
4037   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
4038   return list == NULL ? 0 : list->free_bytes();
4039 }
4040 
4041 size_t MetaspaceUtils::free_bytes() {
4042   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
4043 }
4044 
4045 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
4046   assert_lock_strong(MetaspaceExpand_lock);
4047   assert(words <= capacity_words(mdtype),
4048          "About to decrement below 0: words " SIZE_FORMAT
4049          " is greater than _capacity_words[%u] " SIZE_FORMAT,
4050          words, mdtype, capacity_words(mdtype));
4051 
4052   _capacity_words[mdtype] -= words;
4053 }
4054 
4055 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
4056   assert_lock_strong(MetaspaceExpand_lock);
4057   // Needs to be atomic
4058   _capacity_words[mdtype] += words;
4059 }
4060 
4061 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
4062   assert(words <= used_words(mdtype),
4063          "About to decrement below 0: words " SIZE_FORMAT
4064          " is greater than _used_words[%u] " SIZE_FORMAT,
4065          words, mdtype, used_words(mdtype));
4066   // For CMS deallocation of the Metaspaces occurs during the
4067   // sweep which is a concurrent phase.  Protection by the MetaspaceExpand_lock
4068   // is not enough since allocation is on a per Metaspace basis
4069   // and protected by the Metaspace lock.
4070   Atomic::sub(words, &_used_words[mdtype]);
4071 }
4072 
4073 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
4074   // _used_words tracks allocations for
4075   // each piece of metadata.  Those allocations are
4076   // generally done concurrently by different application
4077   // threads so must be done atomically.
4078   Atomic::add(words, &_used_words[mdtype]);
4079 }
4080 
4081 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
4082   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
4083   return list == NULL ? 0 : list->reserved_bytes();
4084 }
4085 
4086 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
4087   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
4088   return list == NULL ? 0 : list->committed_bytes();
4089 }
4090 
4091 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
4092 
4093 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
4094   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
4095   if (chunk_manager == NULL) {
4096     return 0;
4097   }
4098   chunk_manager->slow_verify();
4099   return chunk_manager->free_chunks_total_words();
4100 }
4101 
4102 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
4103   return free_chunks_total_words(mdtype) * BytesPerWord;
4104 }
4105 
4106 size_t MetaspaceUtils::free_chunks_total_words() {
4107   return free_chunks_total_words(Metaspace::ClassType) +
4108          free_chunks_total_words(Metaspace::NonClassType);
4109 }
4110 
4111 size_t MetaspaceUtils::free_chunks_total_bytes() {
4112   return free_chunks_total_words() * BytesPerWord;
4113 }
4114 
4115 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
4116   return Metaspace::get_chunk_manager(mdtype) != NULL;
4117 }
4118 
4119 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
4120   if (!has_chunk_free_list(mdtype)) {
4121     return MetaspaceChunkFreeListSummary();
4122   }
4123 
4124   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
4125   return cm->chunk_free_list_summary();
4126 }
4127 
4128 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
4129   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
4130                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
4131 }
4132 
4133 void MetaspaceUtils::print_on(outputStream* out) {
4134   Metaspace::MetadataType nct = Metaspace::NonClassType;
4135 
4136   out->print_cr(" Metaspace       "
4137                 "used "      SIZE_FORMAT "K, "
4138                 "capacity "  SIZE_FORMAT "K, "
4139                 "committed " SIZE_FORMAT "K, "
4140                 "reserved "  SIZE_FORMAT "K",
4141                 used_bytes()/K,
4142                 capacity_bytes()/K,
4143                 committed_bytes()/K,
4144                 reserved_bytes()/K);
4145 
4146   if (Metaspace::using_class_space()) {
4147     Metaspace::MetadataType ct = Metaspace::ClassType;
4148     out->print_cr("  class space    "
4149                   "used "      SIZE_FORMAT "K, "
4150                   "capacity "  SIZE_FORMAT "K, "
4151                   "committed " SIZE_FORMAT "K, "
4152                   "reserved "  SIZE_FORMAT "K",
4153                   used_bytes(ct)/K,
4154                   capacity_bytes(ct)/K,
4155                   committed_bytes(ct)/K,
4156                   reserved_bytes(ct)/K);
4157   }
4158 }
4159 
4160 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4161 private:
4162   outputStream* const _out;
4163   const size_t        _scale;
4164   const bool          _do_print;
4165   const bool          _break_down_by_chunktype;
4166 
4167 public:
4168 
4169   uintx                           _num_loaders_with_metaspace;
4170   uintx                           _num_loaders_without_metaspace;
4171   ClassLoaderMetaspaceStatistics  _stats_total;
4172 
4173   uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
4174   ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
4175 
4176 public:
4177   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
4178     : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
4179     , _num_loaders_with_metaspace(0)
4180     , _num_loaders_without_metaspace(0)
4181   {
4182     memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
4183   }
4184 
4185   void do_cld(ClassLoaderData* cld) {
4186 
4187     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4188 
4189     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4190     if (msp == NULL) {
4191       _num_loaders_without_metaspace ++;
4192       return;
4193     }
4194 
4195     // Collect statistics for this class loader metaspace
4196     ClassLoaderMetaspaceStatistics this_cld_stat;
4197     msp->add_to_statistics(&this_cld_stat);
4198 
4199     // And add it to the running totals
4200     _stats_total.add(this_cld_stat);
4201     _num_loaders_with_metaspace ++;
4202     _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
4203     _num_loaders_by_spacetype[msp->space_type()] ++;
4204 
4205     // Optionally, print.
4206     if (_do_print) {
4207 
4208       _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders_with_metaspace);
4209 
4210       if (cld->is_anonymous()) {
4211         _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
4212       } else {
4213         ResourceMark rm;
4214         _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
4215       }
4216 
4217       if (msp->space_type() != Metaspace::StandardMetaspaceType) {
4218         _out->print(", %s loader", space_type_name(msp->space_type()));
4219       }
4220 
4221       if (cld->is_unloading()) {
4222         _out->print(", unloading");
4223       }
4224 
4225       _out->cr();
4226       this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
4227       _out->cr();
4228 
4229     }
4230 
4231   } // do_cld
4232 
4233 };
4234 
4235 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
4236 
4237   const bool print_loaders = (flags & rf_show_loaders) > 0;
4238   const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
4239   const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
4240   bool have_detailed_cl_data = false;
4241 
4242   // Some report options require walking the class loader data graph.
4243   PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
4244   if (print_loaders) {
4245     out->cr();
4246     out->print_cr("Usage per loader:");
4247     out->cr();
4248   }
4249   if (print_loaders || print_by_chunktype || print_by_spacetype) {
4250     ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
4251     have_detailed_cl_data = true;
4252   }
4253 
4254   // Print totals, broken up by space type.
4255   if (print_by_spacetype) {
4256     out->cr();
4257     out->print_cr("Usage per space type:");
4258     out->cr();
4259     for (int space_type = (int)Metaspace::ZeroMetaspaceType;
4260          space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
4261     {
4262       uintx num = cl._num_loaders_by_spacetype[space_type];
4263       out->print("%s: " UINTX_FORMAT " spaces%c",
4264         space_type_name((Metaspace::MetaspaceType)space_type),
4265         num, num > 0 ? ':' : '.');
4266       if (num > 0) {
4267         out->cr();
4268         cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
4269       }
4270       out->cr();
4271     }
4272   }
4273 
4274   // Print totals for in-use data:
4275   out->cr();
4276   out->print_cr("Total Usage:");
4277   out->cr();
4278 
4279   if (have_detailed_cl_data) {
4280     out->print_cr(UINTX_FORMAT " loaders (" UINTX_FORMAT " without metaspace)",
4281         cl._num_loaders_with_metaspace + cl._num_loaders_without_metaspace, cl._num_loaders_without_metaspace);
4282     out->cr();
4283     cl._stats_total.print_on(out, scale, print_by_chunktype);
4284   } else {
4285     // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
4286     // MetaspaceUtils.
4287     const size_t cap_nonclass = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
4288     const size_t used_nonclass = MetaspaceUtils::used_words(Metaspace::NonClassType);
4289     const size_t free_and_waste_nonclass = cap_nonclass - used_nonclass;
4290     if (Metaspace::using_class_space()) {
4291       out->print_cr("  Non-class space:");
4292     }
4293     print_scaled_words(out, cap_nonclass, scale, 6);
4294     out->print(" capacity, ");
4295     print_scaled_words_and_percentage(out, used_nonclass, cap_nonclass, scale, 6);
4296     out->print(" used, ");
4297     print_scaled_words_and_percentage(out, free_and_waste_nonclass, cap_nonclass, scale, 6);
4298     out->print(" free+waste. ");
4299 
4300     if (Metaspace::using_class_space()) {
4301       out->print_cr("      Class space:");
4302       const size_t cap_class = MetaspaceUtils::capacity_words(Metaspace::ClassType);
4303       const size_t used_class = MetaspaceUtils::used_words(Metaspace::ClassType);
4304       const size_t free_and_waste_class = cap_class - used_class;
4305       print_scaled_words(out, cap_class, scale, 6);
4306       out->print(" capacity, ");
4307       print_scaled_words_and_percentage(out, used_class, cap_class, scale, 6);
4308       out->print(" used, ");
4309       print_scaled_words_and_percentage(out, free_and_waste_class, cap_class, scale, 6);
4310       out->print(" free+waste. ");
4311 
4312       out->print_cr("            Total:");
4313       const size_t cap = cap_nonclass + cap_class;
4314       const size_t used = used_nonclass + used_class;
4315       const size_t free_and_waste = free_and_waste_nonclass + free_and_waste_class;
4316       print_scaled_words(out, cap, scale, 6);
4317       out->print(" capacity, ");
4318       print_scaled_words_and_percentage(out, used, cap, scale, 6);
4319       out->print(" used, ");
4320       print_scaled_words_and_percentage(out, free_and_waste, cap, scale, 6);
4321       out->print(" free+waste. ");
4322     }
4323     out->cr();
4324   }
4325 
4326   // -- Print Virtual space.
4327   out->cr();
4328   out->print_cr("Virtual Space:");
4329   out->cr();
4330   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
4331   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
4332   const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
4333   const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
4334   const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
4335   const size_t committed_words = committed_nonclass_words + committed_class_words;
4336   {
4337     if (Metaspace::using_class_space()) {
4338       out->print_cr("  Non-class space:");
4339     }
4340     print_scaled_words(out, reserved_nonclass_words, scale, 7);
4341     out->print(" reserved, ");
4342     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
4343     out->print(" committed ");
4344 
4345     if (Metaspace::using_class_space()) {
4346       out->print_cr("      Class space:");
4347       print_scaled_words(out, reserved_nonclass_words, scale, 7);
4348       out->print(" reserved, ");
4349       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
4350       out->print(" committed ");
4351 
4352       out->print_cr("            Total:");
4353       print_scaled_words(out, reserved_words, scale, 7);
4354       out->print(" reserved, ");
4355       print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
4356       out->print(" committed ");
4357     }
4358   }
4359   out->cr();
4360 
4361   // -- Print VirtualSpaceList details.
4362   if ((flags & rf_show_vslist) > 0) {
4363     out->cr();
4364     out->print_cr("Virtual Space List%s:", Metaspace::using_class_space() ? "s" : "");
4365     out->cr();
4366     if (Metaspace::using_class_space()) {
4367       out->print_cr("   Non-Class:");
4368     }
4369     Metaspace::space_list()->print_on(out, scale);
4370     if (Metaspace::using_class_space()) {
4371       out->print_cr("       Class:");
4372       Metaspace::class_space_list()->print_on(out, scale);
4373     }
4374   }
4375   out->cr();
4376 
4377   // -- Print VirtualSpaceList map.
4378   if ((flags & rf_show_vsmap) > 0) {
4379     out->cr();
4380     out->print_cr("Virtual Space Map:");
4381     out->cr();
4382     if (Metaspace::using_class_space()) {
4383       out->print_cr("   Non-Class:");
4384     }
4385     Metaspace::space_list()->print_map(out);
4386     if (Metaspace::using_class_space()) {
4387       out->print_cr("       Class:");
4388       Metaspace::class_space_list()->print_map(out);
4389     }
4390   }
4391   out->cr();
4392 
4393   // -- Print Freelists (ChunkManager) details
4394   out->cr();
4395   out->print("Free Chunk List%s:", Metaspace::using_class_space() ? "s" : "");
4396   out->cr();
4397 
4398   if ((flags & rf_show_chunk_freelist) > 0) {
4399     ChunkManagerStatistics non_class_cm_stat;
4400     Metaspace::chunk_manager_metadata()->get_statistics(&non_class_cm_stat);
4401     ChunkManagerStatistics class_cm_stat;
4402     Metaspace::chunk_manager_class()->get_statistics(&class_cm_stat);
4403 
4404     if (Metaspace::using_class_space()) {
4405       out->print_cr("   Non-Class:");
4406     }
4407     non_class_cm_stat.print_on(out, scale);
4408 
4409     if (Metaspace::using_class_space()) {
4410       out->print_cr("       Class:");
4411       class_cm_stat.print_on(out, scale);
4412     }
4413   } else {
4414     // In its basic form, report only capacity in free chunks, but take those numbers from the
4415     // running totals in the chunk managers to avoid locking.
4416     if (Metaspace::using_class_space()) {
4417       out->print_cr("   Non-Class:");
4418     }
4419     print_scaled_bytes(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
4420     out->cr();
4421     if (Metaspace::using_class_space()) {
4422       out->print_cr("       Class:");
4423       print_scaled_bytes(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
4424       out->cr();
4425     }
4426     out->cr();
4427   }
4428 
4429   // As a convenience, print a summary of common waste.
4430   out->cr();
4431   out->print_cr("Waste:");
4432   // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
4433   out->print("  (Percentages are of total committed metaspace size (");
4434   print_scaled_words(out, committed_words, scale);
4435   out->print_cr(")");
4436 
4437   // Print waste for in-use chunks.
4438   if (have_detailed_cl_data) {
4439     UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
4440     UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
4441     UsedChunksStatistics ucs_all;
4442     ucs_all.add(ucs_nonclass);
4443     ucs_all.add(ucs_class);
4444     out->print("Waste in chunks in use:         ");
4445     print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
4446     out->cr();
4447     out->print("Free in chunks in use:          ");
4448     print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
4449     out->cr();
4450   } else {
4451     // if we did not walk the CLDG, use the running numbers.
4452     size_t free_and_waste_words = MetaspaceUtils::capacity_words() - MetaspaceUtils::used_words();
4453     out->print("Free+Waste in chunks in use:    ");
4454     print_scaled_words_and_percentage(out, free_and_waste_words, committed_words, scale, 6);
4455     out->cr();
4456   }
4457 
4458   // Print waste in deallocated blocks.
4459   if (have_detailed_cl_data) {
4460     const uintx free_blocks_num =
4461         cl._stats_total.nonclass_sm_stats().free_blocks_num() +
4462         cl._stats_total.class_sm_stats().free_blocks_num();
4463     const size_t free_blocks_cap_words =
4464         cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
4465         cl._stats_total.class_sm_stats().free_blocks_cap_words();
4466     out->print("Deallocated from chunks in use: " UINTX_FORMAT " blocks, total size ", free_blocks_num);
4467     print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
4468     out->cr();
4469   }
4470 
4471   // Print waste in free chunks.
4472   {
4473     const size_t total_capacity_in_free_chunks =
4474         Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
4475         Metaspace::chunk_manager_class()->free_chunks_total_words();
4476     out->print("In free chunks:                 ");
4477     print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
4478     out->cr();
4479   }
4480 
4481   // Print internal statistics
4482 #ifdef ASSERT
4483   out->cr();
4484   out->cr();
4485   out->print_cr("Internal statistics:");
4486   out->cr();
4487   out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
4488   out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
4489   out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
4490   out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
4491   out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
4492   out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
4493   out->print_cr("Number of de-allocations: " UINTX_FORMAT ".", g_internal_statistics.num_deallocs);
4494   out->print_cr("Allocs statisfied from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
4495   out->cr();
4496 #endif
4497 
4498   // Print some interesting settings
4499   out->cr();
4500   out->cr();
4501   out->print("MaxMetaspaceSize:           ");
4502   print_scaled_bytes(out, MaxMetaspaceSize, scale);
4503   out->cr();
4504   out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
4505   out->cr();
4506   out->print("CompressedClassSpaceSize:   ");
4507   print_scaled_bytes(out, CompressedClassSpaceSize, scale);
4508 
4509   out->cr();
4510   out->cr();
4511 
4512 } // MetaspaceUtils::print_report()
4513 
4514 // Prints an ASCII representation of the given space.
4515 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4516   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4517   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4518   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4519   if (vsl != NULL) {
4520     if (for_class) {
4521       if (!Metaspace::using_class_space()) {
4522         out->print_cr("No Class Space.");
4523         return;
4524       }
4525       out->print_raw("---- Metaspace Map (Class Space) ----");
4526     } else {
4527       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4528     }
4529     // Print legend:
4530     out->cr();
4531     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4532     out->cr();
4533     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4534     vsl->print_map(out);
4535     out->cr();
4536   }
4537 }
4538 
4539 void MetaspaceUtils::verify_free_chunks() {
4540   Metaspace::chunk_manager_metadata()->verify();
4541   if (Metaspace::using_class_space()) {
4542     Metaspace::chunk_manager_class()->verify();
4543   }
4544 }
4545 
4546 void MetaspaceUtils::verify_metrics() {
4547 #ifdef ASSERT
4548   // Please note: there are time windows where the internal counters are out of sync with
4549   // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
4550   // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
4551   // not be counted when iterating the CLDG. So be careful when you call this method.
4552   ClassLoaderMetaspaceStatistics total_stat;
4553   collect_statistics(&total_stat);
4554   UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
4555   UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
4556   bool mismatch =
4557       _capacity_words[Metaspace::NonClassType] != nonclass_chunk_stat.cap() ||
4558       _used_words[Metaspace::NonClassType] != nonclass_chunk_stat.used() ||
4559       _capacity_words[Metaspace::ClassType] != class_chunk_stat.cap() ||
4560       _used_words[Metaspace::ClassType] != class_chunk_stat.used();
4561   if (mismatch) {
4562     tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch.");
4563     tty->print_cr("Expected: non-class cap: " SIZE_FORMAT ", non-class used: " SIZE_FORMAT
4564                   ", class cap: " SIZE_FORMAT ", class used: " SIZE_FORMAT ".",
4565                   _capacity_words[Metaspace::NonClassType], _used_words[Metaspace::NonClassType],
4566                   _capacity_words[Metaspace::ClassType], _used_words[Metaspace::ClassType]);
4567     tty->print_cr("Got: non-class: ");
4568     nonclass_chunk_stat.print_on(tty, sizeof(MetaWord));
4569     tty->cr();
4570     tty->print_cr("         class: ");
4571     class_chunk_stat.print_on(tty, sizeof(MetaWord));
4572     tty->cr();
4573     tty->flush();
4574   }
4575   assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
4576 #endif
4577 }
4578 
4579 
4580 // Metaspace methods
4581 
4582 size_t Metaspace::_first_chunk_word_size = 0;
4583 size_t Metaspace::_first_class_chunk_word_size = 0;
4584 
4585 size_t Metaspace::_commit_alignment = 0;
4586 size_t Metaspace::_reserve_alignment = 0;
4587 
4588 VirtualSpaceList* Metaspace::_space_list = NULL;
4589 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4590 
4591 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4592 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4593 
4594 #define VIRTUALSPACEMULTIPLIER 2
4595 
4596 #ifdef _LP64
4597 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4598 
4599 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4600   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4601   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4602   // narrow_klass_base is the lower of the metaspace base and the cds base
4603   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4604   // between the lower base and higher address.
4605   address lower_base;
4606   address higher_address;
4607 #if INCLUDE_CDS
4608   if (UseSharedSpaces) {
4609     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4610                           (address)(metaspace_base + compressed_class_space_size()));
4611     lower_base = MIN2(metaspace_base, cds_base);
4612   } else
4613 #endif
4614   {
4615     higher_address = metaspace_base + compressed_class_space_size();
4616     lower_base = metaspace_base;
4617 
4618     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4619     // If compressed class space fits in lower 32G, we don't need a base.
4620     if (higher_address <= (address)klass_encoding_max) {
4621       lower_base = 0; // Effectively lower base is zero.
4622     }
4623   }
4624 
4625   Universe::set_narrow_klass_base(lower_base);
4626 
4627   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4628   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4629   // how dump time narrow_klass_shift is set. Although, CDS can work
4630   // with zero-shift mode also, to be consistent with AOT it uses
4631   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4632   // can be used at same time as AOT code.
4633   if (!UseSharedSpaces
4634       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4635     Universe::set_narrow_klass_shift(0);
4636   } else {
4637     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4638   }
4639   AOTLoader::set_narrow_klass_shift();
4640 }
4641 
4642 #if INCLUDE_CDS
4643 // Return TRUE if the specified metaspace_base and cds_base are close enough
4644 // to work with compressed klass pointers.
4645 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4646   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4647   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4648   address lower_base = MIN2((address)metaspace_base, cds_base);
4649   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4650                                 (address)(metaspace_base + compressed_class_space_size()));
4651   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4652 }
4653 #endif
4654 
4655 // Try to allocate the metaspace at the requested addr.
4656 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4657   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4658   assert(using_class_space(), "called improperly");
4659   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4660   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4661          "Metaspace size is too big");
4662   assert_is_aligned(requested_addr, _reserve_alignment);
4663   assert_is_aligned(cds_base, _reserve_alignment);
4664   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4665 
4666   // Don't use large pages for the class space.
4667   bool large_pages = false;
4668 
4669 #if !(defined(AARCH64) || defined(AIX))
4670   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4671                                              _reserve_alignment,
4672                                              large_pages,
4673                                              requested_addr);
4674 #else // AARCH64
4675   ReservedSpace metaspace_rs;
4676 
4677   // Our compressed klass pointers may fit nicely into the lower 32
4678   // bits.
4679   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4680     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4681                                  _reserve_alignment,
4682                                  large_pages,
4683                                  requested_addr);
4684   }
4685 
4686   if (! metaspace_rs.is_reserved()) {
4687     // Aarch64: Try to align metaspace so that we can decode a compressed
4688     // klass with a single MOVK instruction.  We can do this iff the
4689     // compressed class base is a multiple of 4G.
4690     // Aix: Search for a place where we can find memory. If we need to load
4691     // the base, 4G alignment is helpful, too.
4692     size_t increment = AARCH64_ONLY(4*)G;
4693     for (char *a = align_up(requested_addr, increment);
4694          a < (char*)(1024*G);
4695          a += increment) {
4696       if (a == (char *)(32*G)) {
4697         // Go faster from here on. Zero-based is no longer possible.
4698         increment = 4*G;
4699       }
4700 
4701 #if INCLUDE_CDS
4702       if (UseSharedSpaces
4703           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4704         // We failed to find an aligned base that will reach.  Fall
4705         // back to using our requested addr.
4706         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4707                                      _reserve_alignment,
4708                                      large_pages,
4709                                      requested_addr);
4710         break;
4711       }
4712 #endif
4713 
4714       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4715                                    _reserve_alignment,
4716                                    large_pages,
4717                                    a);
4718       if (metaspace_rs.is_reserved())
4719         break;
4720     }
4721   }
4722 
4723 #endif // AARCH64
4724 
4725   if (!metaspace_rs.is_reserved()) {
4726 #if INCLUDE_CDS
4727     if (UseSharedSpaces) {
4728       size_t increment = align_up(1*G, _reserve_alignment);
4729 
4730       // Keep trying to allocate the metaspace, increasing the requested_addr
4731       // by 1GB each time, until we reach an address that will no longer allow
4732       // use of CDS with compressed klass pointers.
4733       char *addr = requested_addr;
4734       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4735              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4736         addr = addr + increment;
4737         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4738                                      _reserve_alignment, large_pages, addr);
4739       }
4740     }
4741 #endif
4742     // If no successful allocation then try to allocate the space anywhere.  If
4743     // that fails then OOM doom.  At this point we cannot try allocating the
4744     // metaspace as if UseCompressedClassPointers is off because too much
4745     // initialization has happened that depends on UseCompressedClassPointers.
4746     // So, UseCompressedClassPointers cannot be turned off at this point.
4747     if (!metaspace_rs.is_reserved()) {
4748       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4749                                    _reserve_alignment, large_pages);
4750       if (!metaspace_rs.is_reserved()) {
4751         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4752                                               compressed_class_space_size()));
4753       }
4754     }
4755   }
4756 
4757   // If we got here then the metaspace got allocated.
4758   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4759 
4760 #if INCLUDE_CDS
4761   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4762   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4763     FileMapInfo::stop_sharing_and_unmap(
4764         "Could not allocate metaspace at a compatible address");
4765   }
4766 #endif
4767   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4768                                   UseSharedSpaces ? (address)cds_base : 0);
4769 
4770   initialize_class_space(metaspace_rs);
4771 
4772   LogTarget(Trace, gc, metaspace) lt;
4773   if (lt.is_enabled()) {
4774     ResourceMark rm;
4775     LogStream ls(lt);
4776     print_compressed_class_space(&ls, requested_addr);
4777   }
4778 }
4779 
4780 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4781   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4782                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4783   if (_class_space_list != NULL) {
4784     address base = (address)_class_space_list->current_virtual_space()->bottom();
4785     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4786                  compressed_class_space_size(), p2i(base));
4787     if (requested_addr != 0) {
4788       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4789     }
4790     st->cr();
4791   }
4792 }
4793 
4794 // For UseCompressedClassPointers the class space is reserved above the top of
4795 // the Java heap.  The argument passed in is at the base of the compressed space.
4796 void Metaspace::initialize_class_space(ReservedSpace rs) {
4797   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4798   assert(rs.size() >= CompressedClassSpaceSize,
4799          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4800   assert(using_class_space(), "Must be using class space");
4801   _class_space_list = new VirtualSpaceList(rs);
4802   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4803 
4804   if (!_class_space_list->initialization_succeeded()) {
4805     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4806   }
4807 }
4808 
4809 #endif
4810 
4811 void Metaspace::ergo_initialize() {
4812   if (DumpSharedSpaces) {
4813     // Using large pages when dumping the shared archive is currently not implemented.
4814     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4815   }
4816 
4817   size_t page_size = os::vm_page_size();
4818   if (UseLargePages && UseLargePagesInMetaspace) {
4819     page_size = os::large_page_size();
4820   }
4821 
4822   _commit_alignment  = page_size;
4823   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4824 
4825   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4826   // override if MaxMetaspaceSize was set on the command line or not.
4827   // This information is needed later to conform to the specification of the
4828   // java.lang.management.MemoryUsage API.
4829   //
4830   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4831   // globals.hpp to the aligned value, but this is not possible, since the
4832   // alignment depends on other flags being parsed.
4833   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4834 
4835   if (MetaspaceSize > MaxMetaspaceSize) {
4836     MetaspaceSize = MaxMetaspaceSize;
4837   }
4838 
4839   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4840 
4841   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4842 
4843   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4844   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4845 
4846   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4847 
4848   // Initial virtual space size will be calculated at global_initialize()
4849   size_t min_metaspace_sz =
4850       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4851   if (UseCompressedClassPointers) {
4852     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4853       if (min_metaspace_sz >= MaxMetaspaceSize) {
4854         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4855       } else {
4856         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4857                       MaxMetaspaceSize - min_metaspace_sz);
4858       }
4859     }
4860   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4861     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4862                   min_metaspace_sz);
4863   }
4864 
4865   set_compressed_class_space_size(CompressedClassSpaceSize);
4866 }
4867 
4868 void Metaspace::global_initialize() {
4869   MetaspaceGC::initialize();
4870 
4871 #if INCLUDE_CDS
4872   if (DumpSharedSpaces) {
4873     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4874   } else if (UseSharedSpaces) {
4875     // If any of the archived space fails to map, UseSharedSpaces
4876     // is reset to false. Fall through to the
4877     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4878     // metaspace.
4879     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4880   }
4881 
4882   if (!DumpSharedSpaces && !UseSharedSpaces)
4883 #endif // INCLUDE_CDS
4884   {
4885 #ifdef _LP64
4886     if (using_class_space()) {
4887       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4888       allocate_metaspace_compressed_klass_ptrs(base, 0);
4889     }
4890 #endif // _LP64
4891   }
4892 
4893   // Initialize these before initializing the VirtualSpaceList
4894   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4895   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4896   // Make the first class chunk bigger than a medium chunk so it's not put
4897   // on the medium chunk list.   The next chunk will be small and progress
4898   // from there.  This size calculated by -version.
4899   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4900                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4901   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4902   // Arbitrarily set the initial virtual space to a multiple
4903   // of the boot class loader size.
4904   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4905   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4906 
4907   // Initialize the list of virtual spaces.
4908   _space_list = new VirtualSpaceList(word_size);
4909   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4910 
4911   if (!_space_list->initialization_succeeded()) {
4912     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4913   }
4914 
4915   _tracer = new MetaspaceTracer();
4916 }
4917 
4918 void Metaspace::post_initialize() {
4919   MetaspaceGC::post_initialize();
4920 }
4921 
4922 void Metaspace::verify_global_initialization() {
4923   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4924   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4925 
4926   if (using_class_space()) {
4927     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4928     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4929   }
4930 }
4931 
4932 size_t Metaspace::align_word_size_up(size_t word_size) {
4933   size_t byte_size = word_size * wordSize;
4934   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4935 }
4936 
4937 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4938                               MetaspaceObj::Type type, TRAPS) {
4939   assert(!_frozen, "sanity");
4940   if (HAS_PENDING_EXCEPTION) {
4941     assert(false, "Should not allocate with exception pending");
4942     return NULL;  // caller does a CHECK_NULL too
4943   }
4944 
4945   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4946         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4947 
4948   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4949 
4950   // Try to allocate metadata.
4951   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4952 
4953   if (result == NULL) {
4954     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4955 
4956     // Allocation failed.
4957     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
4958       // Only start a GC if the bootstrapping has completed.
4959       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
4960       // the VM thread.
4961 
4962       // Try to clean out some memory and retry.
4963       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4964     }
4965   }
4966 
4967   if (result == NULL) {
4968     if (DumpSharedSpaces) {
4969       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
4970       // We should abort to avoid generating a potentially bad archive.
4971       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4972           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4973       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
4974       vm_exit(1);
4975     }
4976     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4977   }
4978 
4979   // Zero initialize.
4980   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4981 
4982   return result;
4983 }
4984 
4985 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4986   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4987 
4988   // If result is still null, we are out of memory.
4989   Log(gc, metaspace, freelist) log;
4990   if (log.is_info()) {
4991     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4992              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4993     ResourceMark rm;
4994     if (log.is_debug()) {
4995       if (loader_data->metaspace_or_null() != NULL) {
4996         LogStream ls(log.debug());
4997         loader_data->print_value_on(&ls);
4998       }
4999     }
5000     LogStream ls(log.info());
5001     // In case of an OOM, log out a short but still useful report.
5002     MetaspaceUtils::print_report(&ls);
5003   }
5004 
5005   bool out_of_compressed_class_space = false;
5006   if (is_class_space_allocation(mdtype)) {
5007     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
5008     out_of_compressed_class_space =
5009       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
5010       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
5011       CompressedClassSpaceSize;
5012   }
5013 
5014   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
5015   const char* space_string = out_of_compressed_class_space ?
5016     "Compressed class space" : "Metaspace";
5017 
5018   report_java_out_of_memory(space_string);
5019 
5020   if (JvmtiExport::should_post_resource_exhausted()) {
5021     JvmtiExport::post_resource_exhausted(
5022         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
5023         space_string);
5024   }
5025 
5026   if (!is_init_completed()) {
5027     vm_exit_during_initialization("OutOfMemoryError", space_string);
5028   }
5029 
5030   if (out_of_compressed_class_space) {
5031     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
5032   } else {
5033     THROW_OOP(Universe::out_of_memory_error_metaspace());
5034   }
5035 }
5036 
5037 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
5038   switch (mdtype) {
5039     case Metaspace::ClassType: return "Class";
5040     case Metaspace::NonClassType: return "Metadata";
5041     default:
5042       assert(false, "Got bad mdtype: %d", (int) mdtype);
5043       return NULL;
5044   }
5045 }
5046 
5047 void Metaspace::purge(MetadataType mdtype) {
5048   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
5049 }
5050 
5051 void Metaspace::purge() {
5052   MutexLockerEx cl(MetaspaceExpand_lock,
5053                    Mutex::_no_safepoint_check_flag);
5054   purge(NonClassType);
5055   if (using_class_space()) {
5056     purge(ClassType);
5057   }
5058 }
5059 
5060 bool Metaspace::contains(const void* ptr) {
5061   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
5062     return true;
5063   }
5064   return contains_non_shared(ptr);
5065 }
5066 
5067 bool Metaspace::contains_non_shared(const void* ptr) {
5068   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
5069      return true;
5070   }
5071 
5072   return get_space_list(NonClassType)->contains(ptr);
5073 }
5074 
5075 // ClassLoaderMetaspace
5076 
5077 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
5078   : _lock(lock)
5079   , _space_type(type)
5080   , _vsm(NULL)
5081   , _class_vsm(NULL)
5082 {
5083   initialize(lock, type);
5084 }
5085 
5086 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
5087   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
5088   delete _vsm;
5089   if (Metaspace::using_class_space()) {
5090     delete _class_vsm;
5091   }
5092 }
5093 
5094 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
5095   Metachunk* chunk = get_initialization_chunk(type, mdtype);
5096   if (chunk != NULL) {
5097     // Add to this manager's list of chunks in use and current_chunk().
5098     get_space_manager(mdtype)->add_chunk(chunk, true);
5099   }
5100 }
5101 
5102 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
5103   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
5104 
5105   // Get a chunk from the chunk freelist
5106   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
5107 
5108   if (chunk == NULL) {
5109     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
5110                                                   get_space_manager(mdtype)->medium_chunk_bunch());
5111   }
5112 
5113   return chunk;
5114 }
5115 
5116 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
5117   Metaspace::verify_global_initialization();
5118 
5119   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
5120 
5121   // Allocate SpaceManager for metadata objects.
5122   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
5123 
5124   if (Metaspace::using_class_space()) {
5125     // Allocate SpaceManager for classes.
5126     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
5127   }
5128 
5129   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5130 
5131   // Allocate chunk for metadata objects
5132   initialize_first_chunk(type, Metaspace::NonClassType);
5133 
5134   // Allocate chunk for class metadata objects
5135   if (Metaspace::using_class_space()) {
5136     initialize_first_chunk(type, Metaspace::ClassType);
5137   }
5138 }
5139 
5140 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
5141   Metaspace::assert_not_frozen();
5142 
5143   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
5144 
5145   // Don't use class_vsm() unless UseCompressedClassPointers is true.
5146   if (Metaspace::is_class_space_allocation(mdtype)) {
5147     return  class_vsm()->allocate(word_size);
5148   } else {
5149     return  vsm()->allocate(word_size);
5150   }
5151 }
5152 
5153 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
5154   Metaspace::assert_not_frozen();
5155   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
5156   assert(delta_bytes > 0, "Must be");
5157 
5158   size_t before = 0;
5159   size_t after = 0;
5160   MetaWord* res;
5161   bool incremented;
5162 
5163   // Each thread increments the HWM at most once. Even if the thread fails to increment
5164   // the HWM, an allocation is still attempted. This is because another thread must then
5165   // have incremented the HWM and therefore the allocation might still succeed.
5166   do {
5167     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
5168     res = allocate(word_size, mdtype);
5169   } while (!incremented && res == NULL);
5170 
5171   if (incremented) {
5172     Metaspace::tracer()->report_gc_threshold(before, after,
5173                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
5174     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
5175   }
5176 
5177   return res;
5178 }
5179 
5180 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
5181   return vsm()->allocated_blocks_bytes() +
5182       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
5183 }
5184 
5185 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
5186   return vsm()->allocated_chunks_bytes() +
5187       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
5188 }
5189 
5190 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
5191   Metaspace::assert_not_frozen();
5192   assert(!SafepointSynchronize::is_at_safepoint()
5193          || Thread::current()->is_VM_thread(), "should be the VM thread");
5194 
5195   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
5196 
5197   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
5198 
5199   if (is_class && Metaspace::using_class_space()) {
5200     class_vsm()->deallocate(ptr, word_size);
5201   } else {
5202     vsm()->deallocate(ptr, word_size);
5203   }
5204 }
5205 
5206 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
5207   assert(Metaspace::using_class_space(), "Has to use class space");
5208   return class_vsm()->calc_chunk_size(word_size);
5209 }
5210 
5211 void ClassLoaderMetaspace::print_on(outputStream* out) const {
5212   // Print both class virtual space counts and metaspace.
5213   if (Verbose) {
5214     vsm()->print_on(out);
5215     if (Metaspace::using_class_space()) {
5216       class_vsm()->print_on(out);
5217     }
5218   }
5219 }
5220 
5221 void ClassLoaderMetaspace::verify() {
5222   vsm()->verify();
5223   if (Metaspace::using_class_space()) {
5224     class_vsm()->verify();
5225   }
5226 }
5227 
5228 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
5229   assert_lock_strong(lock());
5230   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
5231   if (Metaspace::using_class_space()) {
5232     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
5233   }
5234 }
5235 
5236 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
5237   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
5238   add_to_statistics_locked(out);
5239 }
5240 
5241 #ifdef ASSERT
5242 static void do_verify_chunk(Metachunk* chunk) {
5243   guarantee(chunk != NULL, "Sanity");
5244   // Verify chunk itself; then verify that it is consistent with the
5245   // occupany map of its containing node.
5246   chunk->verify();
5247   VirtualSpaceNode* const vsn = chunk->container();
5248   OccupancyMap* const ocmap = vsn->occupancy_map();
5249   ocmap->verify_for_chunk(chunk);
5250 }
5251 #endif
5252 
5253 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5254   chunk->set_is_tagged_free(!inuse);
5255   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5256   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5257 }
5258 
5259 /////////////// Unit tests ///////////////
5260 
5261 #ifndef PRODUCT
5262 
5263 class TestMetaspaceUtilsTest : AllStatic {
5264  public:
5265   static void test_reserved() {
5266     size_t reserved = MetaspaceUtils::reserved_bytes();
5267 
5268     assert(reserved > 0, "assert");
5269 
5270     size_t committed  = MetaspaceUtils::committed_bytes();
5271     assert(committed <= reserved, "assert");
5272 
5273     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5274     assert(reserved_metadata > 0, "assert");
5275     assert(reserved_metadata <= reserved, "assert");
5276 
5277     if (UseCompressedClassPointers) {
5278       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5279       assert(reserved_class > 0, "assert");
5280       assert(reserved_class < reserved, "assert");
5281     }
5282   }
5283 
5284   static void test_committed() {
5285     size_t committed = MetaspaceUtils::committed_bytes();
5286 
5287     assert(committed > 0, "assert");
5288 
5289     size_t reserved  = MetaspaceUtils::reserved_bytes();
5290     assert(committed <= reserved, "assert");
5291 
5292     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5293     assert(committed_metadata > 0, "assert");
5294     assert(committed_metadata <= committed, "assert");
5295 
5296     if (UseCompressedClassPointers) {
5297       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5298       assert(committed_class > 0, "assert");
5299       assert(committed_class < committed, "assert");
5300     }
5301   }
5302 
5303   static void test_virtual_space_list_large_chunk() {
5304     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5305     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5306     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5307     // vm_allocation_granularity aligned on Windows.
5308     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5309     large_size += (os::vm_page_size()/BytesPerWord);
5310     vs_list->get_new_chunk(large_size, 0);
5311   }
5312 
5313   static void test() {
5314     test_reserved();
5315     test_committed();
5316     test_virtual_space_list_large_chunk();
5317   }
5318 };
5319 
5320 void TestMetaspaceUtils_test() {
5321   TestMetaspaceUtilsTest::test();
5322 }
5323 
5324 class TestVirtualSpaceNodeTest {
5325   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5326                                           size_t& num_small_chunks,
5327                                           size_t& num_specialized_chunks) {
5328     num_medium_chunks = words_left / MediumChunk;
5329     words_left = words_left % MediumChunk;
5330 
5331     num_small_chunks = words_left / SmallChunk;
5332     words_left = words_left % SmallChunk;
5333     // how many specialized chunks can we get?
5334     num_specialized_chunks = words_left / SpecializedChunk;
5335     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5336   }
5337 
5338  public:
5339   static void test() {
5340     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5341     const size_t vsn_test_size_words = MediumChunk  * 4;
5342     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5343 
5344     // The chunk sizes must be multiples of eachother, or this will fail
5345     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5346     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5347 
5348     { // No committed memory in VSN
5349       ChunkManager cm(false);
5350       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5351       vsn.initialize();
5352       vsn.retire(&cm);
5353       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5354     }
5355 
5356     { // All of VSN is committed, half is used by chunks
5357       ChunkManager cm(false);
5358       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5359       vsn.initialize();
5360       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5361       vsn.get_chunk_vs(MediumChunk);
5362       vsn.get_chunk_vs(MediumChunk);
5363       vsn.retire(&cm);
5364       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5365       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5366     }
5367 
5368     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5369     // This doesn't work for systems with vm_page_size >= 16K.
5370     if (page_chunks < MediumChunk) {
5371       // 4 pages of VSN is committed, some is used by chunks
5372       ChunkManager cm(false);
5373       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5374 
5375       vsn.initialize();
5376       vsn.expand_by(page_chunks, page_chunks);
5377       vsn.get_chunk_vs(SmallChunk);
5378       vsn.get_chunk_vs(SpecializedChunk);
5379       vsn.retire(&cm);
5380 
5381       // committed - used = words left to retire
5382       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5383 
5384       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5385       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5386 
5387       assert(num_medium_chunks == 0, "should not get any medium chunks");
5388       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5389       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5390     }
5391 
5392     { // Half of VSN is committed, a humongous chunk is used
5393       ChunkManager cm(false);
5394       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5395       vsn.initialize();
5396       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5397       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5398       vsn.retire(&cm);
5399 
5400       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5401       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5402       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5403 
5404       assert(num_medium_chunks == 0, "should not get any medium chunks");
5405       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5406       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5407     }
5408 
5409   }
5410 
5411 #define assert_is_available_positive(word_size) \
5412   assert(vsn.is_available(word_size), \
5413          #word_size ": " PTR_FORMAT " bytes were not available in " \
5414          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5415          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5416 
5417 #define assert_is_available_negative(word_size) \
5418   assert(!vsn.is_available(word_size), \
5419          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5420          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5421          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5422 
5423   static void test_is_available_positive() {
5424     // Reserve some memory.
5425     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5426     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5427 
5428     // Commit some memory.
5429     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5430     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5431     assert(expanded, "Failed to commit");
5432 
5433     // Check that is_available accepts the committed size.
5434     assert_is_available_positive(commit_word_size);
5435 
5436     // Check that is_available accepts half the committed size.
5437     size_t expand_word_size = commit_word_size / 2;
5438     assert_is_available_positive(expand_word_size);
5439   }
5440 
5441   static void test_is_available_negative() {
5442     // Reserve some memory.
5443     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5444     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5445 
5446     // Commit some memory.
5447     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5448     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5449     assert(expanded, "Failed to commit");
5450 
5451     // Check that is_available doesn't accept a too large size.
5452     size_t two_times_commit_word_size = commit_word_size * 2;
5453     assert_is_available_negative(two_times_commit_word_size);
5454   }
5455 
5456   static void test_is_available_overflow() {
5457     // Reserve some memory.
5458     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5459     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5460 
5461     // Commit some memory.
5462     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5463     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5464     assert(expanded, "Failed to commit");
5465 
5466     // Calculate a size that will overflow the virtual space size.
5467     void* virtual_space_max = (void*)(uintptr_t)-1;
5468     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5469     size_t overflow_size = bottom_to_max + BytesPerWord;
5470     size_t overflow_word_size = overflow_size / BytesPerWord;
5471 
5472     // Check that is_available can handle the overflow.
5473     assert_is_available_negative(overflow_word_size);
5474   }
5475 
5476   static void test_is_available() {
5477     TestVirtualSpaceNodeTest::test_is_available_positive();
5478     TestVirtualSpaceNodeTest::test_is_available_negative();
5479     TestVirtualSpaceNodeTest::test_is_available_overflow();
5480   }
5481 };
5482 
5483 // The following test is placed here instead of a gtest / unittest file
5484 // because the ChunkManager class is only available in this file.
5485 void ChunkManager_test_list_index() {
5486   {
5487     // Test previous bug where a query for a humongous class metachunk,
5488     // incorrectly matched the non-class medium metachunk size.
5489     {
5490       ChunkManager manager(true);
5491 
5492       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5493 
5494       ChunkIndex index = manager.list_index(MediumChunk);
5495 
5496       assert(index == HumongousIndex,
5497           "Requested size is larger than ClassMediumChunk,"
5498           " so should return HumongousIndex. Got index: %d", (int)index);
5499     }
5500 
5501     // Check the specified sizes as well.
5502     {
5503       ChunkManager manager(true);
5504       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5505       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5506       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5507       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5508     }
5509     {
5510       ChunkManager manager(false);
5511       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5512       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5513       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5514       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5515     }
5516 
5517   }
5518 
5519 }
5520 
5521 #endif // !PRODUCT
5522 
5523 #ifdef ASSERT
5524 
5525 // The following test is placed here instead of a gtest / unittest file
5526 // because the ChunkManager class is only available in this file.
5527 class SpaceManagerTest : AllStatic {
5528   friend void SpaceManager_test_adjust_initial_chunk_size();
5529 
5530   static void test_adjust_initial_chunk_size(bool is_class) {
5531     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5532     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5533     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5534 
5535 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5536     do {                                                                         \
5537       size_t v = value;                                                          \
5538       size_t e = expected;                                                       \
5539       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5540              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5541     } while (0)
5542 
5543     // Smallest (specialized)
5544     test_adjust_initial_chunk_size(1,            smallest, is_class);
5545     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5546     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5547 
5548     // Small
5549     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5550     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5551     test_adjust_initial_chunk_size(normal,       normal, is_class);
5552 
5553     // Medium
5554     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5555     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5556     test_adjust_initial_chunk_size(medium,     medium, is_class);
5557 
5558     // Humongous
5559     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5560 
5561 #undef test_adjust_initial_chunk_size
5562   }
5563 
5564   static void test_adjust_initial_chunk_size() {
5565     test_adjust_initial_chunk_size(false);
5566     test_adjust_initial_chunk_size(true);
5567   }
5568 };
5569 
5570 void SpaceManager_test_adjust_initial_chunk_size() {
5571   SpaceManagerTest::test_adjust_initial_chunk_size();
5572 }
5573 
5574 #endif // ASSERT
5575 
5576 struct chunkmanager_statistics_t {
5577   int num_specialized_chunks;
5578   int num_small_chunks;
5579   int num_medium_chunks;
5580   int num_humongous_chunks;
5581 };
5582 
5583 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5584   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5585   ChunkManagerStatistics stat;
5586   chunk_manager->get_statistics(&stat);
5587   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5588   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5589   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5590   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5591 }
5592 
5593 struct chunk_geometry_t {
5594   size_t specialized_chunk_word_size;
5595   size_t small_chunk_word_size;
5596   size_t medium_chunk_word_size;
5597 };
5598 
5599 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5600   if (mdType == Metaspace::NonClassType) {
5601     out->specialized_chunk_word_size = SpecializedChunk;
5602     out->small_chunk_word_size = SmallChunk;
5603     out->medium_chunk_word_size = MediumChunk;
5604   } else {
5605     out->specialized_chunk_word_size = ClassSpecializedChunk;
5606     out->small_chunk_word_size = ClassSmallChunk;
5607     out->medium_chunk_word_size = ClassMediumChunk;
5608   }
5609 }