1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/metaspaceStatistics.hpp"
  38 #include "memory/metaspaceGCThresholdUpdater.hpp"
  39 #include "memory/metaspaceShared.hpp"
  40 #include "memory/metaspaceTracer.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutex.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "runtime/orderAccess.inline.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/macros.hpp"
  56 
  57 using namespace metaspace::internals;
  58 
  59 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  60 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  61 
  62 // Helper function that does a bunch of checks for a chunk.
  63 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  64 
  65 // Given a Metachunk, update its in-use information (both in the
  66 // chunk and the occupancy map).
  67 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  68 
  69 size_t const allocation_from_dictionary_limit = 4 * K;
  70 
  71 MetaWord* last_allocated = 0;
  72 
  73 size_t Metaspace::_compressed_class_space_size;
  74 const MetaspaceTracer* Metaspace::_tracer = NULL;
  75 
  76 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  77 
  78 // Internal statistics.
  79 #ifdef ASSERT
  80 static struct {
  81   // Number of allocations.
  82   uintx num_allocs;
  83   // Number of times a ClassLoaderMetaspace was born...
  84   uintx num_metaspace_births;
  85   // ... and died.
  86   uintx num_metaspace_deaths;
  87   // Number of times VirtualSpaceListNodes were created...
  88   uintx num_vsnodes_created;
  89   // ... and purged.
  90   uintx num_vsnodes_purged;
  91   // Number of times we expanded the committed section of the space.
  92   uintx num_committed_space_expanded;
  93   // Number of deallocations
  94   uintx num_deallocs;
  95   // Number of deallocations triggered from outside ("real" deallocations).
  96   uintx num_external_deallocs;
  97   // Number of times an allocation was satisfied from deallocated blocks.
  98   uintx num_allocs_from_deallocated_blocks;
  99 } g_internal_statistics;
 100 #endif
 101 
 102 enum ChunkSizes {    // in words.
 103   ClassSpecializedChunk = 128,
 104   SpecializedChunk = 128,
 105   ClassSmallChunk = 256,
 106   SmallChunk = 512,
 107   ClassMediumChunk = 4 * K,
 108   MediumChunk = 8 * K
 109 };
 110 
 111 // Returns size of this chunk type.
 112 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
 113   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
 114   size_t size = 0;
 115   if (is_class) {
 116     switch(chunktype) {
 117       case SpecializedIndex: size = ClassSpecializedChunk; break;
 118       case SmallIndex: size = ClassSmallChunk; break;
 119       case MediumIndex: size = ClassMediumChunk; break;
 120       default:
 121         ShouldNotReachHere();
 122     }
 123   } else {
 124     switch(chunktype) {
 125       case SpecializedIndex: size = SpecializedChunk; break;
 126       case SmallIndex: size = SmallChunk; break;
 127       case MediumIndex: size = MediumChunk; break;
 128       default:
 129         ShouldNotReachHere();
 130     }
 131   }
 132   return size;
 133 }
 134 
 135 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 136   if (is_class) {
 137     if (size == ClassSpecializedChunk) {
 138       return SpecializedIndex;
 139     } else if (size == ClassSmallChunk) {
 140       return SmallIndex;
 141     } else if (size == ClassMediumChunk) {
 142       return MediumIndex;
 143     } else if (size > ClassMediumChunk) {
 144       // A valid humongous chunk size is a multiple of the smallest chunk size.
 145       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 146       return HumongousIndex;
 147     }
 148   } else {
 149     if (size == SpecializedChunk) {
 150       return SpecializedIndex;
 151     } else if (size == SmallChunk) {
 152       return SmallIndex;
 153     } else if (size == MediumChunk) {
 154       return MediumIndex;
 155     } else if (size > MediumChunk) {
 156       // A valid humongous chunk size is a multiple of the smallest chunk size.
 157       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 158       return HumongousIndex;
 159     }
 160   }
 161   ShouldNotReachHere();
 162   return (ChunkIndex)-1;
 163 }
 164 
 165 ChunkIndex next_chunk_index(ChunkIndex i) {
 166   assert(i < NumberOfInUseLists, "Out of bound");
 167   return (ChunkIndex) (i+1);
 168 }
 169 
 170 ChunkIndex prev_chunk_index(ChunkIndex i) {
 171   assert(i > ZeroIndex, "Out of bound");
 172   return (ChunkIndex) (i-1);
 173 }
 174 
 175 static const char* space_type_name(Metaspace::MetaspaceType t) {
 176   const char* s = NULL;
 177   switch (t) {
 178   case Metaspace::StandardMetaspaceType: s = "Standard"; break;
 179   case Metaspace::BootMetaspaceType: s = "Boot"; break;
 180   case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
 181   case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
 182   default: ShouldNotReachHere();
 183   }
 184   assert(s != NULL, "Invalid space type");
 185   return s;
 186 }
 187 
 188 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 189 uint MetaspaceGC::_shrink_factor = 0;
 190 bool MetaspaceGC::_should_concurrent_collect = false;
 191 
 192 
 193 typedef class FreeList<Metachunk> ChunkList;
 194 
 195 // Manages the global free lists of chunks.
 196 class ChunkManager : public CHeapObj<mtInternal> {
 197   friend class TestVirtualSpaceNodeTest;
 198 
 199   // Free list of chunks of different sizes.
 200   //   SpecializedChunk
 201   //   SmallChunk
 202   //   MediumChunk
 203   ChunkList _free_chunks[NumberOfFreeLists];
 204 
 205   // Whether or not this is the class chunkmanager.
 206   const bool _is_class;
 207 
 208   // Return non-humongous chunk list by its index.
 209   ChunkList* free_chunks(ChunkIndex index);
 210 
 211   // Returns non-humongous chunk list for the given chunk word size.
 212   ChunkList* find_free_chunks_list(size_t word_size);
 213 
 214   //   HumongousChunk
 215   ChunkTreeDictionary _humongous_dictionary;
 216 
 217   // Returns the humongous chunk dictionary.
 218   ChunkTreeDictionary* humongous_dictionary() {
 219     return &_humongous_dictionary;
 220   }
 221 
 222   // Size, in metaspace words, of all chunks managed by this ChunkManager
 223   size_t _free_chunks_total;
 224   // Number of chunks in this ChunkManager
 225   size_t _free_chunks_count;
 226 
 227   // Update counters after a chunk was added or removed removed.
 228   void account_for_added_chunk(const Metachunk* c);
 229   void account_for_removed_chunk(const Metachunk* c);
 230 
 231   // Debug support
 232 
 233   size_t sum_free_chunks();
 234   size_t sum_free_chunks_count();
 235 
 236   void locked_verify_free_chunks_total();
 237   void slow_locked_verify_free_chunks_total() {
 238     if (VerifyMetaspace) {
 239       locked_verify_free_chunks_total();
 240     }
 241   }
 242   void locked_verify_free_chunks_count();
 243   void slow_locked_verify_free_chunks_count() {
 244     if (VerifyMetaspace) {
 245       locked_verify_free_chunks_count();
 246     }
 247   }
 248   void verify_free_chunks_count();
 249 
 250   // Given a pointer to a chunk, attempts to merge it with neighboring
 251   // free chunks to form a bigger chunk. Returns true if successful.
 252   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 253 
 254   // Helper for chunk merging:
 255   //  Given an address range with 1-n chunks which are all supposed to be
 256   //  free and hence currently managed by this ChunkManager, remove them
 257   //  from this ChunkManager and mark them as invalid.
 258   // - This does not correct the occupancy map.
 259   // - This does not adjust the counters in ChunkManager.
 260   // - Does not adjust container count counter in containing VirtualSpaceNode.
 261   // Returns number of chunks removed.
 262   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 263 
 264   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 265   // split up the larger chunk into n smaller chunks, at least one of which should be
 266   // the target chunk of target chunk size. The smaller chunks, including the target
 267   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 268   // Note that this chunk is supposed to be removed from the freelist right away.
 269   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 270 
 271  public:
 272 
 273   ChunkManager(bool is_class)
 274       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 275     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 276     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 277     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 278   }
 279 
 280   // Add or delete (return) a chunk to the global freelist.
 281   Metachunk* chunk_freelist_allocate(size_t word_size);
 282 
 283   // Map a size to a list index assuming that there are lists
 284   // for special, small, medium, and humongous chunks.
 285   ChunkIndex list_index(size_t size);
 286 
 287   // Map a given index to the chunk size.
 288   size_t size_by_index(ChunkIndex index) const;
 289 
 290   bool is_class() const { return _is_class; }
 291 
 292   // Convenience accessors.
 293   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 294   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 295   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 296 
 297   // Take a chunk from the ChunkManager. The chunk is expected to be in
 298   // the chunk manager (the freelist if non-humongous, the dictionary if
 299   // humongous).
 300   void remove_chunk(Metachunk* chunk);
 301 
 302   // Return a single chunk of type index to the ChunkManager.
 303   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 304 
 305   // Add the simple linked list of chunks to the freelist of chunks
 306   // of type index.
 307   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 308 
 309   // Total of the space in the free chunks list
 310   size_t free_chunks_total_words();
 311   size_t free_chunks_total_bytes();
 312 
 313   // Number of chunks in the free chunks list
 314   size_t free_chunks_count();
 315 
 316   // Remove from a list by size.  Selects list based on size of chunk.
 317   Metachunk* free_chunks_get(size_t chunk_word_size);
 318 
 319 #define index_bounds_check(index)                                         \
 320   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 321 
 322   size_t num_free_chunks(ChunkIndex index) const {
 323     index_bounds_check(index);
 324 
 325     if (index == HumongousIndex) {
 326       return _humongous_dictionary.total_free_blocks();
 327     }
 328 
 329     ssize_t count = _free_chunks[index].count();
 330     return count == -1 ? 0 : (size_t) count;
 331   }
 332 
 333   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 334     index_bounds_check(index);
 335 
 336     size_t word_size = 0;
 337     if (index == HumongousIndex) {
 338       word_size = _humongous_dictionary.total_size();
 339     } else {
 340       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 341       word_size = size_per_chunk_in_words * num_free_chunks(index);
 342     }
 343 
 344     return word_size * BytesPerWord;
 345   }
 346 
 347   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 348     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 349                                          num_free_chunks(SmallIndex),
 350                                          num_free_chunks(MediumIndex),
 351                                          num_free_chunks(HumongousIndex),
 352                                          size_free_chunks_in_bytes(SpecializedIndex),
 353                                          size_free_chunks_in_bytes(SmallIndex),
 354                                          size_free_chunks_in_bytes(MediumIndex),
 355                                          size_free_chunks_in_bytes(HumongousIndex));
 356   }
 357 
 358   // Debug support
 359   void verify();
 360   void slow_verify() {
 361     if (VerifyMetaspace) {
 362       verify();
 363     }
 364   }
 365   void locked_verify();
 366   void slow_locked_verify() {
 367     if (VerifyMetaspace) {
 368       locked_verify();
 369     }
 370   }
 371   void verify_free_chunks_total();
 372 
 373   void locked_print_free_chunks(outputStream* st);
 374   void locked_print_sum_free_chunks(outputStream* st);
 375 
 376   void print_on(outputStream* st) const;
 377 
 378   // Fill in current statistic values to the given statistics object.
 379   void collect_statistics(ChunkManagerStatistics* out) const;
 380 
 381 };
 382 
 383 class SmallBlocks : public CHeapObj<mtClass> {
 384   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 385   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 386 
 387  private:
 388   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 389 
 390   FreeList<Metablock>& list_at(size_t word_size) {
 391     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 392     return _small_lists[word_size - _small_block_min_size];
 393   }
 394 
 395  public:
 396   SmallBlocks() {
 397     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 398       uint k = i - _small_block_min_size;
 399       _small_lists[k].set_size(i);
 400     }
 401   }
 402 
 403   // Returns the total size, in words, of all blocks, across all block sizes.
 404   size_t total_size() const {
 405     size_t result = 0;
 406     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 407       uint k = i - _small_block_min_size;
 408       result = result + _small_lists[k].count() * _small_lists[k].size();
 409     }
 410     return result;
 411   }
 412 
 413   // Returns the total number of all blocks across all block sizes.
 414   uintx total_num_blocks() const {
 415     uintx result = 0;
 416     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 417       uint k = i - _small_block_min_size;
 418       result = result + _small_lists[k].count();
 419     }
 420     return result;
 421   }
 422 
 423   static uint small_block_max_size() { return _small_block_max_size; }
 424   static uint small_block_min_size() { return _small_block_min_size; }
 425 
 426   MetaWord* get_block(size_t word_size) {
 427     if (list_at(word_size).count() > 0) {
 428       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 429       return new_block;
 430     } else {
 431       return NULL;
 432     }
 433   }
 434   void return_block(Metablock* free_chunk, size_t word_size) {
 435     list_at(word_size).return_chunk_at_head(free_chunk, false);
 436     assert(list_at(word_size).count() > 0, "Should have a chunk");
 437   }
 438 
 439   void print_on(outputStream* st) const {
 440     st->print_cr("SmallBlocks:");
 441     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 442       uint k = i - _small_block_min_size;
 443       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 444     }
 445   }
 446 };
 447 
 448 // Used to manage the free list of Metablocks (a block corresponds
 449 // to the allocation of a quantum of metadata).
 450 class BlockFreelist : public CHeapObj<mtClass> {
 451   BlockTreeDictionary* const _dictionary;
 452   SmallBlocks* _small_blocks;
 453 
 454   // Only allocate and split from freelist if the size of the allocation
 455   // is at least 1/4th the size of the available block.
 456   const static int WasteMultiplier = 4;
 457 
 458   // Accessors
 459   BlockTreeDictionary* dictionary() const { return _dictionary; }
 460   SmallBlocks* small_blocks() {
 461     if (_small_blocks == NULL) {
 462       _small_blocks = new SmallBlocks();
 463     }
 464     return _small_blocks;
 465   }
 466 
 467  public:
 468   BlockFreelist();
 469   ~BlockFreelist();
 470 
 471   // Get and return a block to the free list
 472   MetaWord* get_block(size_t word_size);
 473   void return_block(MetaWord* p, size_t word_size);
 474 
 475   // Returns the total size, in words, of all blocks kept in this structure.
 476   size_t total_size() const  {
 477     size_t result = dictionary()->total_size();
 478     if (_small_blocks != NULL) {
 479       result = result + _small_blocks->total_size();
 480     }
 481     return result;
 482   }
 483 
 484   // Returns the number of all blocks kept in this structure.
 485   uintx num_blocks() const {
 486     uintx result = dictionary()->total_free_blocks();
 487     if (_small_blocks != NULL) {
 488       result = result + _small_blocks->total_num_blocks();
 489     }
 490     return result;
 491   }
 492 
 493   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 494   void print_on(outputStream* st) const;
 495 };
 496 
 497 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 498 template <typename T> struct all_ones  { static const T value; };
 499 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 500 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 501 
 502 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 503 // keeps information about
 504 // - where a chunk starts
 505 // - whether a chunk is in-use or free
 506 // A bit in this bitmap represents one range of memory in the smallest
 507 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 508 class OccupancyMap : public CHeapObj<mtInternal> {
 509 
 510   // The address range this map covers.
 511   const MetaWord* const _reference_address;
 512   const size_t _word_size;
 513 
 514   // The word size of a specialized chunk, aka the number of words one
 515   // bit in this map represents.
 516   const size_t _smallest_chunk_word_size;
 517 
 518   // map data
 519   // Data are organized in two bit layers:
 520   // The first layer is the chunk-start-map. Here, a bit is set to mark
 521   // the corresponding region as the head of a chunk.
 522   // The second layer is the in-use-map. Here, a set bit indicates that
 523   // the corresponding belongs to a chunk which is in use.
 524   uint8_t* _map[2];
 525 
 526   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 527 
 528   // length, in bytes, of bitmap data
 529   size_t _map_size;
 530 
 531   // Returns true if bit at position pos at bit-layer layer is set.
 532   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 533     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 534     const unsigned byteoffset = pos / 8;
 535     assert(byteoffset < _map_size,
 536            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 537     const unsigned mask = 1 << (pos % 8);
 538     return (_map[layer][byteoffset] & mask) > 0;
 539   }
 540 
 541   // Changes bit at position pos at bit-layer layer to value v.
 542   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 543     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 544     const unsigned byteoffset = pos / 8;
 545     assert(byteoffset < _map_size,
 546            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 547     const unsigned mask = 1 << (pos % 8);
 548     if (v) {
 549       _map[layer][byteoffset] |= mask;
 550     } else {
 551       _map[layer][byteoffset] &= ~mask;
 552     }
 553   }
 554 
 555   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 556   // pos is 32/64 aligned and num_bits is 32/64.
 557   // This is the typical case when coalescing to medium chunks, whose size is
 558   // 32 or 64 times the specialized chunk size (depending on class or non class
 559   // case), so they occupy 64 bits which should be 64bit aligned, because
 560   // chunks are chunk-size aligned.
 561   template <typename T>
 562   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 563     assert(_map_size > 0, "not initialized");
 564     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 565     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 566     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 567     const size_t byteoffset = pos / 8;
 568     assert(byteoffset <= (_map_size - sizeof(T)),
 569            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 570     const T w = *(T*)(_map[layer] + byteoffset);
 571     return w > 0 ? true : false;
 572   }
 573 
 574   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 575   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 576     if (pos % 32 == 0 && num_bits == 32) {
 577       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 578     } else if (pos % 64 == 0 && num_bits == 64) {
 579       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 580     } else {
 581       for (unsigned n = 0; n < num_bits; n ++) {
 582         if (get_bit_at_position(pos + n, layer)) {
 583           return true;
 584         }
 585       }
 586     }
 587     return false;
 588   }
 589 
 590   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 591   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 592     assert(word_size % _smallest_chunk_word_size == 0,
 593         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 594     const unsigned pos = get_bitpos_for_address(p);
 595     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 596     return is_any_bit_set_in_region(pos, num_bits, layer);
 597   }
 598 
 599   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 600   // pos is 32/64 aligned and num_bits is 32/64.
 601   // This is the typical case when coalescing to medium chunks, whose size
 602   // is 32 or 64 times the specialized chunk size (depending on class or non
 603   // class case), so they occupy 64 bits which should be 64bit aligned,
 604   // because chunks are chunk-size aligned.
 605   template <typename T>
 606   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 607     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 608            (unsigned)(sizeof(T) * 8), pos);
 609     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 610            num_bits, (unsigned)(sizeof(T) * 8));
 611     const size_t byteoffset = pos / 8;
 612     assert(byteoffset <= (_map_size - sizeof(T)),
 613            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 614     T* const pw = (T*)(_map[layer] + byteoffset);
 615     *pw = v ? all_ones<T>::value : (T) 0;
 616   }
 617 
 618   // Set all bits in a region starting at pos to a value.
 619   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 620     assert(_map_size > 0, "not initialized");
 621     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 622     if (pos % 32 == 0 && num_bits == 32) {
 623       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 624     } else if (pos % 64 == 0 && num_bits == 64) {
 625       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 626     } else {
 627       for (unsigned n = 0; n < num_bits; n ++) {
 628         set_bit_at_position(pos + n, layer, v);
 629       }
 630     }
 631   }
 632 
 633   // Helper: sets all bits in a region [p, p+word_size).
 634   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 635     assert(word_size % _smallest_chunk_word_size == 0,
 636         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 637     const unsigned pos = get_bitpos_for_address(p);
 638     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 639     set_bits_of_region(pos, num_bits, layer, v);
 640   }
 641 
 642   // Helper: given an address, return the bit position representing that address.
 643   unsigned get_bitpos_for_address(const MetaWord* p) const {
 644     assert(_reference_address != NULL, "not initialized");
 645     assert(p >= _reference_address && p < _reference_address + _word_size,
 646            "Address %p out of range for occupancy map [%p..%p).",
 647             p, _reference_address, _reference_address + _word_size);
 648     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 649            "Address not aligned (%p).", p);
 650     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 651     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 652     return (unsigned) d;
 653   }
 654 
 655  public:
 656 
 657   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 658     _reference_address(reference_address), _word_size(word_size),
 659     _smallest_chunk_word_size(smallest_chunk_word_size) {
 660     assert(reference_address != NULL, "invalid reference address");
 661     assert(is_aligned(reference_address, smallest_chunk_word_size),
 662            "Reference address not aligned to smallest chunk size.");
 663     assert(is_aligned(word_size, smallest_chunk_word_size),
 664            "Word_size shall be a multiple of the smallest chunk size.");
 665     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 666     size_t num_bits = word_size / smallest_chunk_word_size;
 667     _map_size = (num_bits + 7) / 8;
 668     assert(_map_size * 8 >= num_bits, "sanity");
 669     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 670     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 671     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 672     memset(_map[1], 0, _map_size);
 673     memset(_map[0], 0, _map_size);
 674     // Sanity test: the first respectively last possible chunk start address in
 675     // the covered range shall map to the first and last bit in the bitmap.
 676     assert(get_bitpos_for_address(reference_address) == 0,
 677       "First chunk address in range must map to fist bit in bitmap.");
 678     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 679       "Last chunk address in range must map to last bit in bitmap.");
 680   }
 681 
 682   ~OccupancyMap() {
 683     os::free(_map[0]);
 684     os::free(_map[1]);
 685   }
 686 
 687   // Returns true if at address x a chunk is starting.
 688   bool chunk_starts_at_address(MetaWord* p) const {
 689     const unsigned pos = get_bitpos_for_address(p);
 690     return get_bit_at_position(pos, layer_chunk_start_map);
 691   }
 692 
 693   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 694     const unsigned pos = get_bitpos_for_address(p);
 695     set_bit_at_position(pos, layer_chunk_start_map, v);
 696   }
 697 
 698   // Removes all chunk-start-bits inside a region, typically as a
 699   // result of a chunk merge.
 700   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 701     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 702   }
 703 
 704   // Returns true if there are life (in use) chunks in the region limited
 705   // by [p, p+word_size).
 706   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 707     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 708   }
 709 
 710   // Marks the region starting at p with the size word_size as in use
 711   // or free, depending on v.
 712   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 713     set_bits_of_region(p, word_size, layer_in_use_map, v);
 714   }
 715 
 716 #ifdef ASSERT
 717   // Verify occupancy map for the address range [from, to).
 718   // We need to tell it the address range, because the memory the
 719   // occupancy map is covering may not be fully comitted yet.
 720   void verify(MetaWord* from, MetaWord* to) {
 721     Metachunk* chunk = NULL;
 722     int nth_bit_for_chunk = 0;
 723     MetaWord* chunk_end = NULL;
 724     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 725       const unsigned pos = get_bitpos_for_address(p);
 726       // Check the chunk-starts-info:
 727       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 728         // Chunk start marked in bitmap.
 729         chunk = (Metachunk*) p;
 730         if (chunk_end != NULL) {
 731           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 732                  "the next chunk to start at %p).", p, chunk_end);
 733         }
 734         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 735         if (chunk->get_chunk_type() != HumongousIndex) {
 736           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 737         }
 738         chunk_end = p + chunk->word_size();
 739         nth_bit_for_chunk = 0;
 740         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 741       } else {
 742         // No chunk start marked in bitmap.
 743         assert(chunk != NULL, "Chunk should start at start of address range.");
 744         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 745         nth_bit_for_chunk ++;
 746       }
 747       // Check the in-use-info:
 748       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 749       if (in_use_bit) {
 750         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 751                chunk, nth_bit_for_chunk);
 752       } else {
 753         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 754                chunk, nth_bit_for_chunk);
 755       }
 756     }
 757   }
 758 
 759   // Verify that a given chunk is correctly accounted for in the bitmap.
 760   void verify_for_chunk(Metachunk* chunk) {
 761     assert(chunk_starts_at_address((MetaWord*) chunk),
 762            "No chunk start marked in map for chunk %p.", chunk);
 763     // For chunks larger than the minimal chunk size, no other chunk
 764     // must start in its area.
 765     if (chunk->word_size() > _smallest_chunk_word_size) {
 766       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 767                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 768              "No chunk must start within another chunk.");
 769     }
 770     if (!chunk->is_tagged_free()) {
 771       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 772              "Chunk %p is in use but marked as free in map (%d %d).",
 773              chunk, chunk->get_chunk_type(), chunk->get_origin());
 774     } else {
 775       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 776              "Chunk %p is free but marked as in-use in map (%d %d).",
 777              chunk, chunk->get_chunk_type(), chunk->get_origin());
 778     }
 779   }
 780 
 781 #endif // ASSERT
 782 
 783 };
 784 
 785 // A VirtualSpaceList node.
 786 class VirtualSpaceNode : public CHeapObj<mtClass> {
 787   friend class VirtualSpaceList;
 788 
 789   // Link to next VirtualSpaceNode
 790   VirtualSpaceNode* _next;
 791 
 792   // Whether this node is contained in class or metaspace.
 793   const bool _is_class;
 794 
 795   // total in the VirtualSpace
 796   MemRegion _reserved;
 797   ReservedSpace _rs;
 798   VirtualSpace _virtual_space;
 799   MetaWord* _top;
 800   // count of chunks contained in this VirtualSpace
 801   uintx _container_count;
 802 
 803   OccupancyMap* _occupancy_map;
 804 
 805   // Convenience functions to access the _virtual_space
 806   char* low()  const { return virtual_space()->low(); }
 807   char* high() const { return virtual_space()->high(); }
 808 
 809   // The first Metachunk will be allocated at the bottom of the
 810   // VirtualSpace
 811   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 812 
 813   // Committed but unused space in the virtual space
 814   size_t free_words_in_vs() const;
 815 
 816   // True if this node belongs to class metaspace.
 817   bool is_class() const { return _is_class; }
 818 
 819   // Helper function for take_from_committed: allocate padding chunks
 820   // until top is at the given address.
 821   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 822 
 823  public:
 824 
 825   VirtualSpaceNode(bool is_class, size_t byte_size);
 826   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 827     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 828   ~VirtualSpaceNode();
 829 
 830   // Convenience functions for logical bottom and end
 831   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 832   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 833 
 834   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 835   OccupancyMap* occupancy_map() { return _occupancy_map; }
 836 
 837   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 838 
 839   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 840   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 841 
 842   bool is_pre_committed() const { return _virtual_space.special(); }
 843 
 844   // address of next available space in _virtual_space;
 845   // Accessors
 846   VirtualSpaceNode* next() { return _next; }
 847   void set_next(VirtualSpaceNode* v) { _next = v; }
 848 
 849   void set_reserved(MemRegion const v) { _reserved = v; }
 850   void set_top(MetaWord* v) { _top = v; }
 851 
 852   // Accessors
 853   MemRegion* reserved() { return &_reserved; }
 854   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 855 
 856   // Returns true if "word_size" is available in the VirtualSpace
 857   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 858 
 859   MetaWord* top() const { return _top; }
 860   void inc_top(size_t word_size) { _top += word_size; }
 861 
 862   uintx container_count() { return _container_count; }
 863   void inc_container_count();
 864   void dec_container_count();
 865 #ifdef ASSERT
 866   uintx container_count_slow();
 867   void verify_container_count();
 868 #endif
 869 
 870   // used and capacity in this single entry in the list
 871   size_t used_words_in_vs() const;
 872   size_t capacity_words_in_vs() const;
 873 
 874   bool initialize();
 875 
 876   // get space from the virtual space
 877   Metachunk* take_from_committed(size_t chunk_word_size);
 878 
 879   // Allocate a chunk from the virtual space and return it.
 880   Metachunk* get_chunk_vs(size_t chunk_word_size);
 881 
 882   // Expands/shrinks the committed space in a virtual space.  Delegates
 883   // to Virtualspace
 884   bool expand_by(size_t min_words, size_t preferred_words);
 885 
 886   // In preparation for deleting this node, remove all the chunks
 887   // in the node from any freelist.
 888   void purge(ChunkManager* chunk_manager);
 889 
 890   // If an allocation doesn't fit in the current node a new node is created.
 891   // Allocate chunks out of the remaining committed space in this node
 892   // to avoid wasting that memory.
 893   // This always adds up because all the chunk sizes are multiples of
 894   // the smallest chunk size.
 895   void retire(ChunkManager* chunk_manager);
 896 
 897 
 898   void print_on(outputStream* st) const                 { print_on(st, K); }
 899   void print_on(outputStream* st, size_t scale) const;
 900   void print_map(outputStream* st, bool is_class) const;
 901 
 902   // Debug support
 903   DEBUG_ONLY(void mangle();)
 904   // Verify counters, all chunks in this list node and the occupancy map.
 905   DEBUG_ONLY(void verify();)
 906   // Verify that all free chunks in this node are ideally merged
 907   // (there not should be multiple small chunks where a large chunk could exist.)
 908   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 909 
 910 };
 911 
 912 #define assert_is_aligned(value, alignment)                  \
 913   assert(is_aligned((value), (alignment)),                   \
 914          SIZE_FORMAT_HEX " is not aligned to "               \
 915          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 916 
 917 #define assert_counter(expected_value, real_value, msg) \
 918   assert( (expected_value) == (real_value),             \
 919          "Counter mismatch (%s): expected " SIZE_FORMAT \
 920          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
 921          real_value);
 922 
 923 // Decide if large pages should be committed when the memory is reserved.
 924 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 925   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 926     size_t words = bytes / BytesPerWord;
 927     bool is_class = false; // We never reserve large pages for the class space.
 928     if (MetaspaceGC::can_expand(words, is_class) &&
 929         MetaspaceGC::allowed_expansion() >= words) {
 930       return true;
 931     }
 932   }
 933 
 934   return false;
 935 }
 936 
 937   // byte_size is the size of the associated virtualspace.
 938 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 939   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 940   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 941   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 942   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 943 
 944   if (_rs.is_reserved()) {
 945     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 946     assert(_rs.size() != 0, "Catch if we get a 0 size");
 947     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 948     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 949 
 950     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 951   }
 952 }
 953 
 954 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 955   DEBUG_ONLY(this->verify();)
 956   Metachunk* chunk = first_chunk();
 957   Metachunk* invalid_chunk = (Metachunk*) top();
 958   while (chunk < invalid_chunk ) {
 959     assert(chunk->is_tagged_free(), "Should be tagged free");
 960     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 961     chunk_manager->remove_chunk(chunk);
 962     chunk->remove_sentinel();
 963     assert(chunk->next() == NULL &&
 964            chunk->prev() == NULL,
 965            "Was not removed from its list");
 966     chunk = (Metachunk*) next;
 967   }
 968 }
 969 
 970 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 971 
 972   if (bottom() == top()) {
 973     return;
 974   }
 975 
 976   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 977   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 978   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 979 
 980   int line_len = 100;
 981   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 982   line_len = (int)(section_len / spec_chunk_size);
 983 
 984   static const int NUM_LINES = 4;
 985 
 986   char* lines[NUM_LINES];
 987   for (int i = 0; i < NUM_LINES; i ++) {
 988     lines[i] = (char*)os::malloc(line_len, mtInternal);
 989   }
 990   int pos = 0;
 991   const MetaWord* p = bottom();
 992   const Metachunk* chunk = (const Metachunk*)p;
 993   const MetaWord* chunk_end = p + chunk->word_size();
 994   while (p < top()) {
 995     if (pos == line_len) {
 996       pos = 0;
 997       for (int i = 0; i < NUM_LINES; i ++) {
 998         st->fill_to(22);
 999         st->print_raw(lines[i], line_len);
1000         st->cr();
1001       }
1002     }
1003     if (pos == 0) {
1004       st->print(PTR_FORMAT ":", p2i(p));
1005     }
1006     if (p == chunk_end) {
1007       chunk = (Metachunk*)p;
1008       chunk_end = p + chunk->word_size();
1009     }
1010     // line 1: chunk starting points (a dot if that area is a chunk start).
1011     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
1012 
1013     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
1014     // chunk is in use.
1015     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
1016     if (chunk->word_size() == spec_chunk_size) {
1017       lines[1][pos] = chunk_is_free ? 'x' : 'X';
1018     } else if (chunk->word_size() == small_chunk_size) {
1019       lines[1][pos] = chunk_is_free ? 's' : 'S';
1020     } else if (chunk->word_size() == med_chunk_size) {
1021       lines[1][pos] = chunk_is_free ? 'm' : 'M';
1022     } else if (chunk->word_size() > med_chunk_size) {
1023       lines[1][pos] = chunk_is_free ? 'h' : 'H';
1024     } else {
1025       ShouldNotReachHere();
1026     }
1027 
1028     // Line 3: chunk origin
1029     const ChunkOrigin origin = chunk->get_origin();
1030     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
1031 
1032     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
1033     //         but were never used.
1034     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
1035 
1036     p += spec_chunk_size;
1037     pos ++;
1038   }
1039   if (pos > 0) {
1040     for (int i = 0; i < NUM_LINES; i ++) {
1041       st->fill_to(22);
1042       st->print_raw(lines[i], line_len);
1043       st->cr();
1044     }
1045   }
1046   for (int i = 0; i < NUM_LINES; i ++) {
1047     os::free(lines[i]);
1048   }
1049 }
1050 
1051 
1052 #ifdef ASSERT
1053 uintx VirtualSpaceNode::container_count_slow() {
1054   uintx count = 0;
1055   Metachunk* chunk = first_chunk();
1056   Metachunk* invalid_chunk = (Metachunk*) top();
1057   while (chunk < invalid_chunk ) {
1058     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1059     do_verify_chunk(chunk);
1060     // Don't count the chunks on the free lists.  Those are
1061     // still part of the VirtualSpaceNode but not currently
1062     // counted.
1063     if (!chunk->is_tagged_free()) {
1064       count++;
1065     }
1066     chunk = (Metachunk*) next;
1067   }
1068   return count;
1069 }
1070 #endif
1071 
1072 #ifdef ASSERT
1073 // Verify counters, all chunks in this list node and the occupancy map.
1074 void VirtualSpaceNode::verify() {
1075   uintx num_in_use_chunks = 0;
1076   Metachunk* chunk = first_chunk();
1077   Metachunk* invalid_chunk = (Metachunk*) top();
1078 
1079   // Iterate the chunks in this node and verify each chunk.
1080   while (chunk < invalid_chunk ) {
1081     DEBUG_ONLY(do_verify_chunk(chunk);)
1082     if (!chunk->is_tagged_free()) {
1083       num_in_use_chunks ++;
1084     }
1085     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1086     chunk = (Metachunk*) next;
1087   }
1088   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1089          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1090   // Also verify the occupancy map.
1091   occupancy_map()->verify(this->bottom(), this->top());
1092 }
1093 #endif // ASSERT
1094 
1095 #ifdef ASSERT
1096 // Verify that all free chunks in this node are ideally merged
1097 // (there not should be multiple small chunks where a large chunk could exist.)
1098 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1099   Metachunk* chunk = first_chunk();
1100   Metachunk* invalid_chunk = (Metachunk*) top();
1101   // Shorthands.
1102   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1103   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1104   int num_free_chunks_since_last_med_boundary = -1;
1105   int num_free_chunks_since_last_small_boundary = -1;
1106   while (chunk < invalid_chunk ) {
1107     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1108     // Reset the counter when encountering a non-free chunk.
1109     if (chunk->get_chunk_type() != HumongousIndex) {
1110       if (chunk->is_tagged_free()) {
1111         // Count successive free, non-humongous chunks.
1112         if (is_aligned(chunk, size_small)) {
1113           assert(num_free_chunks_since_last_small_boundary <= 1,
1114                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1115           num_free_chunks_since_last_small_boundary = 0;
1116         } else if (num_free_chunks_since_last_small_boundary != -1) {
1117           num_free_chunks_since_last_small_boundary ++;
1118         }
1119         if (is_aligned(chunk, size_med)) {
1120           assert(num_free_chunks_since_last_med_boundary <= 1,
1121                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1122           num_free_chunks_since_last_med_boundary = 0;
1123         } else if (num_free_chunks_since_last_med_boundary != -1) {
1124           num_free_chunks_since_last_med_boundary ++;
1125         }
1126       } else {
1127         // Encountering a non-free chunk, reset counters.
1128         num_free_chunks_since_last_med_boundary = -1;
1129         num_free_chunks_since_last_small_boundary = -1;
1130       }
1131     } else {
1132       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1133       num_free_chunks_since_last_med_boundary = -1;
1134       num_free_chunks_since_last_small_boundary = -1;
1135     }
1136 
1137     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1138     chunk = (Metachunk*) next;
1139   }
1140 }
1141 #endif // ASSERT
1142 
1143 // List of VirtualSpaces for metadata allocation.
1144 class VirtualSpaceList : public CHeapObj<mtClass> {
1145   friend class VirtualSpaceNode;
1146 
1147   enum VirtualSpaceSizes {
1148     VirtualSpaceSize = 256 * K
1149   };
1150 
1151   // Head of the list
1152   VirtualSpaceNode* _virtual_space_list;
1153   // virtual space currently being used for allocations
1154   VirtualSpaceNode* _current_virtual_space;
1155 
1156   // Is this VirtualSpaceList used for the compressed class space
1157   bool _is_class;
1158 
1159   // Sum of reserved and committed memory in the virtual spaces
1160   size_t _reserved_words;
1161   size_t _committed_words;
1162 
1163   // Number of virtual spaces
1164   size_t _virtual_space_count;
1165 
1166   ~VirtualSpaceList();
1167 
1168   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1169 
1170   void set_virtual_space_list(VirtualSpaceNode* v) {
1171     _virtual_space_list = v;
1172   }
1173   void set_current_virtual_space(VirtualSpaceNode* v) {
1174     _current_virtual_space = v;
1175   }
1176 
1177   void link_vs(VirtualSpaceNode* new_entry);
1178 
1179   // Get another virtual space and add it to the list.  This
1180   // is typically prompted by a failed attempt to allocate a chunk
1181   // and is typically followed by the allocation of a chunk.
1182   bool create_new_virtual_space(size_t vs_word_size);
1183 
1184   // Chunk up the unused committed space in the current
1185   // virtual space and add the chunks to the free list.
1186   void retire_current_virtual_space();
1187 
1188  public:
1189   VirtualSpaceList(size_t word_size);
1190   VirtualSpaceList(ReservedSpace rs);
1191 
1192   size_t free_bytes();
1193 
1194   Metachunk* get_new_chunk(size_t chunk_word_size,
1195                            size_t suggested_commit_granularity);
1196 
1197   bool expand_node_by(VirtualSpaceNode* node,
1198                       size_t min_words,
1199                       size_t preferred_words);
1200 
1201   bool expand_by(size_t min_words,
1202                  size_t preferred_words);
1203 
1204   VirtualSpaceNode* current_virtual_space() {
1205     return _current_virtual_space;
1206   }
1207 
1208   bool is_class() const { return _is_class; }
1209 
1210   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1211 
1212   size_t reserved_words()  { return _reserved_words; }
1213   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1214   size_t committed_words() { return _committed_words; }
1215   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1216 
1217   void inc_reserved_words(size_t v);
1218   void dec_reserved_words(size_t v);
1219   void inc_committed_words(size_t v);
1220   void dec_committed_words(size_t v);
1221   void inc_virtual_space_count();
1222   void dec_virtual_space_count();
1223 
1224   bool contains(const void* ptr);
1225 
1226   // Unlink empty VirtualSpaceNodes and free it.
1227   void purge(ChunkManager* chunk_manager);
1228 
1229   void print_on(outputStream* st) const                 { print_on(st, K); }
1230   void print_on(outputStream* st, size_t scale) const;
1231   void print_map(outputStream* st) const;
1232 
1233   class VirtualSpaceListIterator : public StackObj {
1234     VirtualSpaceNode* _virtual_spaces;
1235    public:
1236     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1237       _virtual_spaces(virtual_spaces) {}
1238 
1239     bool repeat() {
1240       return _virtual_spaces != NULL;
1241     }
1242 
1243     VirtualSpaceNode* get_next() {
1244       VirtualSpaceNode* result = _virtual_spaces;
1245       if (_virtual_spaces != NULL) {
1246         _virtual_spaces = _virtual_spaces->next();
1247       }
1248       return result;
1249     }
1250   };
1251 };
1252 
1253 class Metadebug : AllStatic {
1254   // Debugging support for Metaspaces
1255   static int _allocation_fail_alot_count;
1256 
1257  public:
1258 
1259   static void init_allocation_fail_alot_count();
1260 #ifdef ASSERT
1261   static bool test_metadata_failure();
1262 #endif
1263 };
1264 
1265 int Metadebug::_allocation_fail_alot_count = 0;
1266 
1267 
1268 //  SpaceManager - used by Metaspace to handle allocations
1269 class SpaceManager : public CHeapObj<mtClass> {
1270   friend class ClassLoaderMetaspace;
1271   friend class Metadebug;
1272 
1273  private:
1274 
1275   // protects allocations
1276   Mutex* const _lock;
1277 
1278   // Type of metadata allocated.
1279   const Metaspace::MetadataType   _mdtype;
1280 
1281   // Type of metaspace
1282   const Metaspace::MetaspaceType  _space_type;
1283 
1284   // List of chunks in use by this SpaceManager.  Allocations
1285   // are done from the current chunk.  The list is used for deallocating
1286   // chunks when the SpaceManager is freed.
1287   Metachunk* _chunks_in_use[NumberOfInUseLists];
1288   Metachunk* _current_chunk;
1289 
1290   // Maximum number of small chunks to allocate to a SpaceManager
1291   static uint const _small_chunk_limit;
1292 
1293   // Maximum number of specialize chunks to allocate for anonymous and delegating
1294   // metadata space to a SpaceManager
1295   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1296 
1297   // Some running counters, but lets keep their number small to not add to much to
1298   // the per-classloader footprint.
1299   // Note: capacity = used + free + waste + overhead. We do not keep running counters for
1300   // free and waste. Their sum can be deduced from the three other values.
1301   size_t _overhead_words;
1302   size_t _capacity_words;
1303   size_t _used_words;
1304 
1305   // Free lists of blocks are per SpaceManager since they
1306   // are assumed to be in chunks in use by the SpaceManager
1307   // and all chunks in use by a SpaceManager are freed when
1308   // the class loader using the SpaceManager is collected.
1309   BlockFreelist* _block_freelists;
1310 
1311  private:
1312   // Accessors
1313   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1314   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1315     _chunks_in_use[index] = v;
1316   }
1317 
1318   BlockFreelist* block_freelists() const { return _block_freelists; }
1319 
1320   Metaspace::MetadataType mdtype() { return _mdtype; }
1321 
1322   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1323   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1324 
1325   Metachunk* current_chunk() const { return _current_chunk; }
1326   void set_current_chunk(Metachunk* v) {
1327     _current_chunk = v;
1328   }
1329 
1330   Metachunk* find_current_chunk(size_t word_size);
1331 
1332   // Add chunk to the list of chunks in use
1333   void add_chunk(Metachunk* v, bool make_current);
1334   void retire_current_chunk();
1335 
1336   Mutex* lock() const { return _lock; }
1337 
1338   // Adds to the given statistic object. Expects to be locked with lock().
1339   void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1340 
1341   // Verify internal counters against the current state. Expects to be locked with lock().
1342   DEBUG_ONLY(void verify_metrics_locked() const;)
1343 
1344  protected:
1345   void initialize();
1346 
1347  public:
1348   SpaceManager(Metaspace::MetadataType mdtype,
1349                Metaspace::MetaspaceType space_type,
1350                Mutex* lock);
1351   ~SpaceManager();
1352 
1353   enum ChunkMultiples {
1354     MediumChunkMultiple = 4
1355   };
1356 
1357   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1358   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1359   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1360 
1361   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1362 
1363   // Accessors
1364   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1365 
1366   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1367   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1368   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1369 
1370   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1371 
1372   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1373 
1374   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1375 
1376   size_t capacity_words() const     { return _capacity_words; }
1377   size_t used_words() const         { return _used_words; }
1378   size_t overhead_words() const     { return _overhead_words; }
1379 
1380   // Adjust local, global counters after a new chunk has been added.
1381   void account_for_new_chunk(const Metachunk* new_chunk);
1382 
1383   // Adjust local, global counters after space has been allocated from the current chunk.
1384   void account_for_allocation(size_t words);
1385 
1386   // Adjust global counters just before the SpaceManager dies, after all its chunks
1387   // have been returned to the freelist.
1388   void account_for_spacemanager_death();
1389 
1390   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1391   // or return the unadjusted size if the requested size is humongous.
1392   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1393   size_t adjust_initial_chunk_size(size_t requested) const;
1394 
1395   // Get the initial chunks size for this metaspace type.
1396   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1397 
1398   // Todo: remove this once we have counters by chunk type.
1399   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1400 
1401   Metachunk* get_new_chunk(size_t chunk_word_size);
1402 
1403   // Block allocation and deallocation.
1404   // Allocates a block from the current chunk
1405   MetaWord* allocate(size_t word_size);
1406 
1407   // Helper for allocations
1408   MetaWord* allocate_work(size_t word_size);
1409 
1410   // Returns a block to the per manager freelist
1411   void deallocate(MetaWord* p, size_t word_size);
1412 
1413   // Based on the allocation size and a minimum chunk size,
1414   // returned chunk size (for expanding space for chunk allocation).
1415   size_t calc_chunk_size(size_t allocation_word_size);
1416 
1417   // Called when an allocation from the current chunk fails.
1418   // Gets a new chunk (may require getting a new virtual space),
1419   // and allocates from that chunk.
1420   MetaWord* grow_and_allocate(size_t word_size);
1421 
1422   // Notify memory usage to MemoryService.
1423   void track_metaspace_memory_usage();
1424 
1425   // debugging support.
1426 
1427   void print_on(outputStream* st) const;
1428   void locked_print_chunks_in_use_on(outputStream* st) const;
1429 
1430   void verify();
1431   void verify_chunk_size(Metachunk* chunk);
1432 
1433   // This adjusts the size given to be greater than the minimum allocation size in
1434   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1435   size_t get_allocation_word_size(size_t word_size) {
1436     size_t byte_size = word_size * BytesPerWord;
1437 
1438     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1439     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1440 
1441     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1442     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1443 
1444     return raw_word_size;
1445   }
1446 
1447   // Adds to the given statistic object.
1448   void add_to_statistics(SpaceManagerStatistics* out) const;
1449 
1450   // Verify internal counters against the current state.
1451   DEBUG_ONLY(void verify_metrics() const;)
1452 
1453 };
1454 
1455 uint const SpaceManager::_small_chunk_limit = 4;
1456 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1457 
1458 void VirtualSpaceNode::inc_container_count() {
1459   assert_lock_strong(MetaspaceExpand_lock);
1460   _container_count++;
1461 }
1462 
1463 void VirtualSpaceNode::dec_container_count() {
1464   assert_lock_strong(MetaspaceExpand_lock);
1465   _container_count--;
1466 }
1467 
1468 #ifdef ASSERT
1469 void VirtualSpaceNode::verify_container_count() {
1470   assert(_container_count == container_count_slow(),
1471          "Inconsistency in container_count _container_count " UINTX_FORMAT
1472          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1473 }
1474 #endif
1475 
1476 // BlockFreelist methods
1477 
1478 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1479 
1480 BlockFreelist::~BlockFreelist() {
1481   delete _dictionary;
1482   if (_small_blocks != NULL) {
1483     delete _small_blocks;
1484   }
1485 }
1486 
1487 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1488   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1489 
1490   Metablock* free_chunk = ::new (p) Metablock(word_size);
1491   if (word_size < SmallBlocks::small_block_max_size()) {
1492     small_blocks()->return_block(free_chunk, word_size);
1493   } else {
1494   dictionary()->return_chunk(free_chunk);
1495 }
1496   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1497             SIZE_FORMAT, p2i(free_chunk), word_size);
1498 }
1499 
1500 MetaWord* BlockFreelist::get_block(size_t word_size) {
1501   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1502 
1503   // Try small_blocks first.
1504   if (word_size < SmallBlocks::small_block_max_size()) {
1505     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1506     // this space manager.
1507     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1508     if (new_block != NULL) {
1509       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1510               p2i(new_block), word_size);
1511       return new_block;
1512     }
1513   }
1514 
1515   if (word_size < BlockFreelist::min_dictionary_size()) {
1516     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1517     return NULL;
1518   }
1519 
1520   Metablock* free_block = dictionary()->get_chunk(word_size);
1521   if (free_block == NULL) {
1522     return NULL;
1523   }
1524 
1525   const size_t block_size = free_block->size();
1526   if (block_size > WasteMultiplier * word_size) {
1527     return_block((MetaWord*)free_block, block_size);
1528     return NULL;
1529   }
1530 
1531   MetaWord* new_block = (MetaWord*)free_block;
1532   assert(block_size >= word_size, "Incorrect size of block from freelist");
1533   const size_t unused = block_size - word_size;
1534   if (unused >= SmallBlocks::small_block_min_size()) {
1535     return_block(new_block + word_size, unused);
1536   }
1537 
1538   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1539             p2i(new_block), word_size);
1540   return new_block;
1541 }
1542 
1543 void BlockFreelist::print_on(outputStream* st) const {
1544   dictionary()->print_free_lists(st);
1545   if (_small_blocks != NULL) {
1546     _small_blocks->print_on(st);
1547   }
1548 }
1549 
1550 // VirtualSpaceNode methods
1551 
1552 VirtualSpaceNode::~VirtualSpaceNode() {
1553   _rs.release();
1554   if (_occupancy_map != NULL) {
1555     delete _occupancy_map;
1556   }
1557 #ifdef ASSERT
1558   size_t word_size = sizeof(*this) / BytesPerWord;
1559   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1560 #endif
1561 }
1562 
1563 size_t VirtualSpaceNode::used_words_in_vs() const {
1564   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1565 }
1566 
1567 // Space committed in the VirtualSpace
1568 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1569   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1570 }
1571 
1572 size_t VirtualSpaceNode::free_words_in_vs() const {
1573   return pointer_delta(end(), top(), sizeof(MetaWord));
1574 }
1575 
1576 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1577 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1578 
1579   assert(target_top > top(), "Sanity");
1580 
1581   // Padding chunks are added to the freelist.
1582   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1583 
1584   // shorthands
1585   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1586   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1587   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1588 
1589   while (top() < target_top) {
1590 
1591     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1592     // for padding chunks, so it is not worth it.
1593     size_t padding_chunk_word_size = small_word_size;
1594     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1595       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1596       padding_chunk_word_size = spec_word_size;
1597     }
1598     MetaWord* here = top();
1599     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1600     inc_top(padding_chunk_word_size);
1601 
1602     // Create new padding chunk.
1603     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1604     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1605 
1606     Metachunk* const padding_chunk =
1607       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1608     assert(padding_chunk == (Metachunk*)here, "Sanity");
1609     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1610     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1611                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1612                                        (is_class() ? "class space " : "metaspace"),
1613                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1614 
1615     // Mark chunk start in occupancy map.
1616     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1617 
1618     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1619     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1620     // will assert that).
1621     do_update_in_use_info_for_chunk(padding_chunk, true);
1622 
1623     // Return Chunk to freelist.
1624     inc_container_count();
1625     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1626     // Please note: at this point, ChunkManager::return_single_chunk()
1627     // may already have merged the padding chunk with neighboring chunks, so
1628     // it may have vanished at this point. Do not reference the padding
1629     // chunk beyond this point.
1630   }
1631 
1632   assert(top() == target_top, "Sanity");
1633 
1634 } // allocate_padding_chunks_until_top_is_at()
1635 
1636 // Allocates the chunk from the virtual space only.
1637 // This interface is also used internally for debugging.  Not all
1638 // chunks removed here are necessarily used for allocation.
1639 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1640   // Non-humongous chunks are to be allocated aligned to their chunk
1641   // size. So, start addresses of medium chunks are aligned to medium
1642   // chunk size, those of small chunks to small chunk size and so
1643   // forth. This facilitates merging of free chunks and reduces
1644   // fragmentation. Chunk sizes are spec < small < medium, with each
1645   // larger chunk size being a multiple of the next smaller chunk
1646   // size.
1647   // Because of this alignment, me may need to create a number of padding
1648   // chunks. These chunks are created and added to the freelist.
1649 
1650   // The chunk manager to which we will give our padding chunks.
1651   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1652 
1653   // shorthands
1654   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1655   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1656   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1657 
1658   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1659          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1660 
1661   // Chunk alignment (in bytes) == chunk size unless humongous.
1662   // Humongous chunks are aligned to the smallest chunk size (spec).
1663   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1664                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1665 
1666   // Do we have enough space to create the requested chunk plus
1667   // any padding chunks needed?
1668   MetaWord* const next_aligned =
1669     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1670   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1671     return NULL;
1672   }
1673 
1674   // Before allocating the requested chunk, allocate padding chunks if necessary.
1675   // We only need to do this for small or medium chunks: specialized chunks are the
1676   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1677   // (implicitly, also aligned to smallest chunk size).
1678   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1679     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1680         (is_class() ? "class space " : "metaspace"),
1681         top(), next_aligned);
1682     allocate_padding_chunks_until_top_is_at(next_aligned);
1683     // Now, top should be aligned correctly.
1684     assert_is_aligned(top(), required_chunk_alignment);
1685   }
1686 
1687   // Now, top should be aligned correctly.
1688   assert_is_aligned(top(), required_chunk_alignment);
1689 
1690   // Bottom of the new chunk
1691   MetaWord* chunk_limit = top();
1692   assert(chunk_limit != NULL, "Not safe to call this method");
1693 
1694   // The virtual spaces are always expanded by the
1695   // commit granularity to enforce the following condition.
1696   // Without this the is_available check will not work correctly.
1697   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1698       "The committed memory doesn't match the expanded memory.");
1699 
1700   if (!is_available(chunk_word_size)) {
1701     LogTarget(Debug, gc, metaspace, freelist) lt;
1702     if (lt.is_enabled()) {
1703       LogStream ls(lt);
1704       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1705       // Dump some information about the virtual space that is nearly full
1706       print_on(&ls);
1707       ls.cr(); // ~LogStream does not autoflush.
1708     }
1709     return NULL;
1710   }
1711 
1712   // Take the space  (bump top on the current virtual space).
1713   inc_top(chunk_word_size);
1714 
1715   // Initialize the chunk
1716   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1717   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1718   assert(result == (Metachunk*)chunk_limit, "Sanity");
1719   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1720   do_update_in_use_info_for_chunk(result, true);
1721 
1722   inc_container_count();
1723 
1724   if (VerifyMetaspace) {
1725     DEBUG_ONLY(chunk_manager->locked_verify());
1726     DEBUG_ONLY(this->verify());
1727   }
1728 
1729   DEBUG_ONLY(do_verify_chunk(result));
1730 
1731   result->inc_use_count();
1732 
1733   return result;
1734 }
1735 
1736 
1737 // Expand the virtual space (commit more of the reserved space)
1738 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1739   size_t min_bytes = min_words * BytesPerWord;
1740   size_t preferred_bytes = preferred_words * BytesPerWord;
1741 
1742   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1743 
1744   if (uncommitted < min_bytes) {
1745     return false;
1746   }
1747 
1748   size_t commit = MIN2(preferred_bytes, uncommitted);
1749   bool result = virtual_space()->expand_by(commit, false);
1750 
1751   if (result) {
1752     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1753               (is_class() ? "class" : "non-class"), commit);
1754     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
1755   } else {
1756     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1757               (is_class() ? "class" : "non-class"), commit);
1758   }
1759 
1760   assert(result, "Failed to commit memory");
1761 
1762   return result;
1763 }
1764 
1765 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1766   assert_lock_strong(MetaspaceExpand_lock);
1767   Metachunk* result = take_from_committed(chunk_word_size);
1768   return result;
1769 }
1770 
1771 bool VirtualSpaceNode::initialize() {
1772 
1773   if (!_rs.is_reserved()) {
1774     return false;
1775   }
1776 
1777   // These are necessary restriction to make sure that the virtual space always
1778   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1779   // aligned only the middle alignment of the VirtualSpace is used.
1780   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1781   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1782 
1783   // ReservedSpaces marked as special will have the entire memory
1784   // pre-committed. Setting a committed size will make sure that
1785   // committed_size and actual_committed_size agrees.
1786   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1787 
1788   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1789                                             Metaspace::commit_alignment());
1790   if (result) {
1791     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1792         "Checking that the pre-committed memory was registered by the VirtualSpace");
1793 
1794     set_top((MetaWord*)virtual_space()->low());
1795     set_reserved(MemRegion((HeapWord*)_rs.base(),
1796                  (HeapWord*)(_rs.base() + _rs.size())));
1797 
1798     assert(reserved()->start() == (HeapWord*) _rs.base(),
1799            "Reserved start was not set properly " PTR_FORMAT
1800            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1801     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1802            "Reserved size was not set properly " SIZE_FORMAT
1803            " != " SIZE_FORMAT, reserved()->word_size(),
1804            _rs.size() / BytesPerWord);
1805   }
1806 
1807   // Initialize Occupancy Map.
1808   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1809   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1810 
1811   return result;
1812 }
1813 
1814 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
1815   size_t used_words = used_words_in_vs();
1816   size_t commit_words = committed_words();
1817   size_t res_words = reserved_words();
1818   VirtualSpace* vs = virtual_space();
1819 
1820   st->print("node @" PTR_FORMAT ": ", p2i(this));
1821   st->print("reserved=");
1822   print_scaled_words(st, res_words, scale);
1823   st->print(", committed=");
1824   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
1825   st->print(", used=");
1826   print_scaled_words_and_percentage(st, used_words, res_words, scale);
1827   st->cr();
1828   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
1829            PTR_FORMAT ", " PTR_FORMAT ")",
1830            p2i(bottom()), p2i(top()), p2i(end()),
1831            p2i(vs->high_boundary()));
1832 }
1833 
1834 #ifdef ASSERT
1835 void VirtualSpaceNode::mangle() {
1836   size_t word_size = capacity_words_in_vs();
1837   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1838 }
1839 #endif // ASSERT
1840 
1841 // VirtualSpaceList methods
1842 // Space allocated from the VirtualSpace
1843 
1844 VirtualSpaceList::~VirtualSpaceList() {
1845   VirtualSpaceListIterator iter(virtual_space_list());
1846   while (iter.repeat()) {
1847     VirtualSpaceNode* vsl = iter.get_next();
1848     delete vsl;
1849   }
1850 }
1851 
1852 void VirtualSpaceList::inc_reserved_words(size_t v) {
1853   assert_lock_strong(MetaspaceExpand_lock);
1854   _reserved_words = _reserved_words + v;
1855 }
1856 void VirtualSpaceList::dec_reserved_words(size_t v) {
1857   assert_lock_strong(MetaspaceExpand_lock);
1858   _reserved_words = _reserved_words - v;
1859 }
1860 
1861 #define assert_committed_below_limit()                        \
1862   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1863          "Too much committed memory. Committed: " SIZE_FORMAT \
1864          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1865           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1866 
1867 void VirtualSpaceList::inc_committed_words(size_t v) {
1868   assert_lock_strong(MetaspaceExpand_lock);
1869   _committed_words = _committed_words + v;
1870 
1871   assert_committed_below_limit();
1872 }
1873 void VirtualSpaceList::dec_committed_words(size_t v) {
1874   assert_lock_strong(MetaspaceExpand_lock);
1875   _committed_words = _committed_words - v;
1876 
1877   assert_committed_below_limit();
1878 }
1879 
1880 void VirtualSpaceList::inc_virtual_space_count() {
1881   assert_lock_strong(MetaspaceExpand_lock);
1882   _virtual_space_count++;
1883 }
1884 void VirtualSpaceList::dec_virtual_space_count() {
1885   assert_lock_strong(MetaspaceExpand_lock);
1886   _virtual_space_count--;
1887 }
1888 
1889 void ChunkManager::remove_chunk(Metachunk* chunk) {
1890   size_t word_size = chunk->word_size();
1891   ChunkIndex index = list_index(word_size);
1892   if (index != HumongousIndex) {
1893     free_chunks(index)->remove_chunk(chunk);
1894   } else {
1895     humongous_dictionary()->remove_chunk(chunk);
1896   }
1897 
1898   // Chunk has been removed from the chunks free list, update counters.
1899   account_for_removed_chunk(chunk);
1900 }
1901 
1902 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1903   assert_lock_strong(MetaspaceExpand_lock);
1904   assert(chunk != NULL, "invalid chunk pointer");
1905   // Check for valid merge combinations.
1906   assert((chunk->get_chunk_type() == SpecializedIndex &&
1907           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1908          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1909         "Invalid chunk merge combination.");
1910 
1911   const size_t target_chunk_word_size =
1912     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1913 
1914   // [ prospective merge region )
1915   MetaWord* const p_merge_region_start =
1916     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1917   MetaWord* const p_merge_region_end =
1918     p_merge_region_start + target_chunk_word_size;
1919 
1920   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1921   VirtualSpaceNode* const vsn = chunk->container();
1922   OccupancyMap* const ocmap = vsn->occupancy_map();
1923 
1924   // The prospective chunk merge range must be completely contained by the
1925   // committed range of the virtual space node.
1926   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1927     return false;
1928   }
1929 
1930   // Only attempt to merge this range if at its start a chunk starts and at its end
1931   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1932   // of that range, we cannot merge.
1933   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1934     return false;
1935   }
1936   if (p_merge_region_end < vsn->top() &&
1937       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1938     return false;
1939   }
1940 
1941   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1942   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1943     return false;
1944   }
1945 
1946   // Success! Remove all chunks in this region...
1947   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1948     (is_class() ? "class space" : "metaspace"),
1949     p_merge_region_start, p_merge_region_end);
1950 
1951   const int num_chunks_removed =
1952     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1953 
1954   // ... and create a single new bigger chunk.
1955   Metachunk* const p_new_chunk =
1956       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1957   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1958   p_new_chunk->set_origin(origin_merge);
1959 
1960   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1961     (is_class() ? "class space" : "metaspace"),
1962     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1963 
1964   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1965   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1966   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1967 
1968   // Mark chunk as free. Note: it is not necessary to update the occupancy
1969   // map in-use map, because the old chunks were also free, so nothing
1970   // should have changed.
1971   p_new_chunk->set_is_tagged_free(true);
1972 
1973   // Add new chunk to its freelist.
1974   ChunkList* const list = free_chunks(target_chunk_type);
1975   list->return_chunk_at_head(p_new_chunk);
1976 
1977   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1978   // should not have changed, because the size of the space should be the same)
1979   _free_chunks_count -= num_chunks_removed;
1980   _free_chunks_count ++;
1981 
1982   // VirtualSpaceNode::container_count does not have to be modified:
1983   // it means "number of active (non-free) chunks", so merging free chunks
1984   // should not affect that count.
1985 
1986   // At the end of a chunk merge, run verification tests.
1987   if (VerifyMetaspace) {
1988     DEBUG_ONLY(this->locked_verify());
1989     DEBUG_ONLY(vsn->verify());
1990   }
1991 
1992   return true;
1993 }
1994 
1995 // Remove all chunks in the given area - the chunks are supposed to be free -
1996 // from their corresponding freelists. Mark them as invalid.
1997 // - This does not correct the occupancy map.
1998 // - This does not adjust the counters in ChunkManager.
1999 // - Does not adjust container count counter in containing VirtualSpaceNode
2000 // Returns number of chunks removed.
2001 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
2002   assert(p != NULL && word_size > 0, "Invalid range.");
2003   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
2004   assert_is_aligned(word_size, smallest_chunk_size);
2005 
2006   Metachunk* const start = (Metachunk*) p;
2007   const Metachunk* const end = (Metachunk*)(p + word_size);
2008   Metachunk* cur = start;
2009   int num_removed = 0;
2010   while (cur < end) {
2011     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
2012     DEBUG_ONLY(do_verify_chunk(cur));
2013     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
2014     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
2015     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
2016       (is_class() ? "class space" : "metaspace"),
2017       cur, cur->word_size() * sizeof(MetaWord));
2018     cur->remove_sentinel();
2019     // Note: cannot call ChunkManager::remove_chunk, because that
2020     // modifies the counters in ChunkManager, which we do not want. So
2021     // we call remove_chunk on the freelist directly (see also the
2022     // splitting function which does the same).
2023     ChunkList* const list = free_chunks(list_index(cur->word_size()));
2024     list->remove_chunk(cur);
2025     num_removed ++;
2026     cur = next;
2027   }
2028   return num_removed;
2029 }
2030 
2031 // Walk the list of VirtualSpaceNodes and delete
2032 // nodes with a 0 container_count.  Remove Metachunks in
2033 // the node from their respective freelists.
2034 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2035   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2036   assert_lock_strong(MetaspaceExpand_lock);
2037   // Don't use a VirtualSpaceListIterator because this
2038   // list is being changed and a straightforward use of an iterator is not safe.
2039   VirtualSpaceNode* purged_vsl = NULL;
2040   VirtualSpaceNode* prev_vsl = virtual_space_list();
2041   VirtualSpaceNode* next_vsl = prev_vsl;
2042   while (next_vsl != NULL) {
2043     VirtualSpaceNode* vsl = next_vsl;
2044     DEBUG_ONLY(vsl->verify_container_count();)
2045     next_vsl = vsl->next();
2046     // Don't free the current virtual space since it will likely
2047     // be needed soon.
2048     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2049       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2050                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2051       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2052       // Unlink it from the list
2053       if (prev_vsl == vsl) {
2054         // This is the case of the current node being the first node.
2055         assert(vsl == virtual_space_list(), "Expected to be the first node");
2056         set_virtual_space_list(vsl->next());
2057       } else {
2058         prev_vsl->set_next(vsl->next());
2059       }
2060 
2061       vsl->purge(chunk_manager);
2062       dec_reserved_words(vsl->reserved_words());
2063       dec_committed_words(vsl->committed_words());
2064       dec_virtual_space_count();
2065       purged_vsl = vsl;
2066       delete vsl;
2067     } else {
2068       prev_vsl = vsl;
2069     }
2070   }
2071 #ifdef ASSERT
2072   if (purged_vsl != NULL) {
2073     // List should be stable enough to use an iterator here.
2074     VirtualSpaceListIterator iter(virtual_space_list());
2075     while (iter.repeat()) {
2076       VirtualSpaceNode* vsl = iter.get_next();
2077       assert(vsl != purged_vsl, "Purge of vsl failed");
2078     }
2079   }
2080 #endif
2081 }
2082 
2083 
2084 // This function looks at the mmap regions in the metaspace without locking.
2085 // The chunks are added with store ordering and not deleted except for at
2086 // unloading time during a safepoint.
2087 bool VirtualSpaceList::contains(const void* ptr) {
2088   // List should be stable enough to use an iterator here because removing virtual
2089   // space nodes is only allowed at a safepoint.
2090   VirtualSpaceListIterator iter(virtual_space_list());
2091   while (iter.repeat()) {
2092     VirtualSpaceNode* vsn = iter.get_next();
2093     if (vsn->contains(ptr)) {
2094       return true;
2095     }
2096   }
2097   return false;
2098 }
2099 
2100 void VirtualSpaceList::retire_current_virtual_space() {
2101   assert_lock_strong(MetaspaceExpand_lock);
2102 
2103   VirtualSpaceNode* vsn = current_virtual_space();
2104 
2105   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2106                                   Metaspace::chunk_manager_metadata();
2107 
2108   vsn->retire(cm);
2109 }
2110 
2111 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2112   DEBUG_ONLY(verify_container_count();)
2113   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2114   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2115     ChunkIndex index = (ChunkIndex)i;
2116     size_t chunk_size = chunk_manager->size_by_index(index);
2117 
2118     while (free_words_in_vs() >= chunk_size) {
2119       Metachunk* chunk = get_chunk_vs(chunk_size);
2120       // Chunk will be allocated aligned, so allocation may require
2121       // additional padding chunks. That may cause above allocation to
2122       // fail. Just ignore the failed allocation and continue with the
2123       // next smaller chunk size. As the VirtualSpaceNode comitted
2124       // size should be a multiple of the smallest chunk size, we
2125       // should always be able to fill the VirtualSpace completely.
2126       if (chunk == NULL) {
2127         break;
2128       }
2129       chunk_manager->return_single_chunk(index, chunk);
2130     }
2131     DEBUG_ONLY(verify_container_count();)
2132   }
2133   assert(free_words_in_vs() == 0, "should be empty now");
2134 }
2135 
2136 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2137                                    _is_class(false),
2138                                    _virtual_space_list(NULL),
2139                                    _current_virtual_space(NULL),
2140                                    _reserved_words(0),
2141                                    _committed_words(0),
2142                                    _virtual_space_count(0) {
2143   MutexLockerEx cl(MetaspaceExpand_lock,
2144                    Mutex::_no_safepoint_check_flag);
2145   create_new_virtual_space(word_size);
2146 }
2147 
2148 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2149                                    _is_class(true),
2150                                    _virtual_space_list(NULL),
2151                                    _current_virtual_space(NULL),
2152                                    _reserved_words(0),
2153                                    _committed_words(0),
2154                                    _virtual_space_count(0) {
2155   MutexLockerEx cl(MetaspaceExpand_lock,
2156                    Mutex::_no_safepoint_check_flag);
2157   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2158   bool succeeded = class_entry->initialize();
2159   if (succeeded) {
2160     link_vs(class_entry);
2161   }
2162 }
2163 
2164 size_t VirtualSpaceList::free_bytes() {
2165   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2166 }
2167 
2168 // Allocate another meta virtual space and add it to the list.
2169 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2170   assert_lock_strong(MetaspaceExpand_lock);
2171 
2172   if (is_class()) {
2173     assert(false, "We currently don't support more than one VirtualSpace for"
2174                   " the compressed class space. The initialization of the"
2175                   " CCS uses another code path and should not hit this path.");
2176     return false;
2177   }
2178 
2179   if (vs_word_size == 0) {
2180     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2181     return false;
2182   }
2183 
2184   // Reserve the space
2185   size_t vs_byte_size = vs_word_size * BytesPerWord;
2186   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2187 
2188   // Allocate the meta virtual space and initialize it.
2189   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2190   if (!new_entry->initialize()) {
2191     delete new_entry;
2192     return false;
2193   } else {
2194     assert(new_entry->reserved_words() == vs_word_size,
2195         "Reserved memory size differs from requested memory size");
2196     // ensure lock-free iteration sees fully initialized node
2197     OrderAccess::storestore();
2198     link_vs(new_entry);
2199     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2200     return true;
2201   }
2202 }
2203 
2204 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2205   if (virtual_space_list() == NULL) {
2206       set_virtual_space_list(new_entry);
2207   } else {
2208     current_virtual_space()->set_next(new_entry);
2209   }
2210   set_current_virtual_space(new_entry);
2211   inc_reserved_words(new_entry->reserved_words());
2212   inc_committed_words(new_entry->committed_words());
2213   inc_virtual_space_count();
2214 #ifdef ASSERT
2215   new_entry->mangle();
2216 #endif
2217   LogTarget(Trace, gc, metaspace) lt;
2218   if (lt.is_enabled()) {
2219     LogStream ls(lt);
2220     VirtualSpaceNode* vsl = current_virtual_space();
2221     ResourceMark rm;
2222     vsl->print_on(&ls);
2223     ls.cr(); // ~LogStream does not autoflush.
2224   }
2225 }
2226 
2227 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2228                                       size_t min_words,
2229                                       size_t preferred_words) {
2230   size_t before = node->committed_words();
2231 
2232   bool result = node->expand_by(min_words, preferred_words);
2233 
2234   size_t after = node->committed_words();
2235 
2236   // after and before can be the same if the memory was pre-committed.
2237   assert(after >= before, "Inconsistency");
2238   inc_committed_words(after - before);
2239 
2240   return result;
2241 }
2242 
2243 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2244   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2245   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2246   assert(min_words <= preferred_words, "Invalid arguments");
2247 
2248   const char* const class_or_not = (is_class() ? "class" : "non-class");
2249 
2250   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2251     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2252               class_or_not);
2253     return  false;
2254   }
2255 
2256   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2257   if (allowed_expansion_words < min_words) {
2258     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2259               class_or_not);
2260     return false;
2261   }
2262 
2263   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2264 
2265   // Commit more memory from the the current virtual space.
2266   bool vs_expanded = expand_node_by(current_virtual_space(),
2267                                     min_words,
2268                                     max_expansion_words);
2269   if (vs_expanded) {
2270      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2271                class_or_not);
2272      return true;
2273   }
2274   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2275             class_or_not);
2276   retire_current_virtual_space();
2277 
2278   // Get another virtual space.
2279   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2280   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2281 
2282   if (create_new_virtual_space(grow_vs_words)) {
2283     if (current_virtual_space()->is_pre_committed()) {
2284       // The memory was pre-committed, so we are done here.
2285       assert(min_words <= current_virtual_space()->committed_words(),
2286           "The new VirtualSpace was pre-committed, so it"
2287           "should be large enough to fit the alloc request.");
2288       return true;
2289     }
2290 
2291     return expand_node_by(current_virtual_space(),
2292                           min_words,
2293                           max_expansion_words);
2294   }
2295 
2296   return false;
2297 }
2298 
2299 // Given a chunk, calculate the largest possible padding space which
2300 // could be required when allocating it.
2301 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2302   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2303   if (chunk_type != HumongousIndex) {
2304     // Normal, non-humongous chunks are allocated at chunk size
2305     // boundaries, so the largest padding space required would be that
2306     // minus the smallest chunk size.
2307     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2308     return chunk_word_size - smallest_chunk_size;
2309   } else {
2310     // Humongous chunks are allocated at smallest-chunksize
2311     // boundaries, so there is no padding required.
2312     return 0;
2313   }
2314 }
2315 
2316 
2317 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2318 
2319   // Allocate a chunk out of the current virtual space.
2320   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2321 
2322   if (next != NULL) {
2323     return next;
2324   }
2325 
2326   // The expand amount is currently only determined by the requested sizes
2327   // and not how much committed memory is left in the current virtual space.
2328 
2329   // We must have enough space for the requested size and any
2330   // additional reqired padding chunks.
2331   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2332 
2333   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2334   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2335   if (min_word_size >= preferred_word_size) {
2336     // Can happen when humongous chunks are allocated.
2337     preferred_word_size = min_word_size;
2338   }
2339 
2340   bool expanded = expand_by(min_word_size, preferred_word_size);
2341   if (expanded) {
2342     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2343     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2344   }
2345 
2346    return next;
2347 }
2348 
2349 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2350   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2351       _virtual_space_count, p2i(_current_virtual_space));
2352   VirtualSpaceListIterator iter(virtual_space_list());
2353   while (iter.repeat()) {
2354     st->cr();
2355     VirtualSpaceNode* node = iter.get_next();
2356     node->print_on(st, scale);
2357   }
2358 }
2359 
2360 void VirtualSpaceList::print_map(outputStream* st) const {
2361   VirtualSpaceNode* list = virtual_space_list();
2362   VirtualSpaceListIterator iter(list);
2363   unsigned i = 0;
2364   while (iter.repeat()) {
2365     st->print_cr("Node %u:", i);
2366     VirtualSpaceNode* node = iter.get_next();
2367     node->print_map(st, this->is_class());
2368     i ++;
2369   }
2370 }
2371 
2372 // MetaspaceGC methods
2373 
2374 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2375 // Within the VM operation after the GC the attempt to allocate the metadata
2376 // should succeed.  If the GC did not free enough space for the metaspace
2377 // allocation, the HWM is increased so that another virtualspace will be
2378 // allocated for the metadata.  With perm gen the increase in the perm
2379 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2380 // metaspace policy uses those as the small and large steps for the HWM.
2381 //
2382 // After the GC the compute_new_size() for MetaspaceGC is called to
2383 // resize the capacity of the metaspaces.  The current implementation
2384 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2385 // to resize the Java heap by some GC's.  New flags can be implemented
2386 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2387 // free space is desirable in the metaspace capacity to decide how much
2388 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2389 // free space is desirable in the metaspace capacity before decreasing
2390 // the HWM.
2391 
2392 // Calculate the amount to increase the high water mark (HWM).
2393 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2394 // another expansion is not requested too soon.  If that is not
2395 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2396 // If that is still not enough, expand by the size of the allocation
2397 // plus some.
2398 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2399   size_t min_delta = MinMetaspaceExpansion;
2400   size_t max_delta = MaxMetaspaceExpansion;
2401   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2402 
2403   if (delta <= min_delta) {
2404     delta = min_delta;
2405   } else if (delta <= max_delta) {
2406     // Don't want to hit the high water mark on the next
2407     // allocation so make the delta greater than just enough
2408     // for this allocation.
2409     delta = max_delta;
2410   } else {
2411     // This allocation is large but the next ones are probably not
2412     // so increase by the minimum.
2413     delta = delta + min_delta;
2414   }
2415 
2416   assert_is_aligned(delta, Metaspace::commit_alignment());
2417 
2418   return delta;
2419 }
2420 
2421 size_t MetaspaceGC::capacity_until_GC() {
2422   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2423   assert(value >= MetaspaceSize, "Not initialized properly?");
2424   return value;
2425 }
2426 
2427 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2428   assert_is_aligned(v, Metaspace::commit_alignment());
2429 
2430   intptr_t capacity_until_GC = _capacity_until_GC;
2431   intptr_t new_value = capacity_until_GC + v;
2432 
2433   if (new_value < capacity_until_GC) {
2434     // The addition wrapped around, set new_value to aligned max value.
2435     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2436   }
2437 
2438   intptr_t expected = _capacity_until_GC;
2439   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2440 
2441   if (expected != actual) {
2442     return false;
2443   }
2444 
2445   if (new_cap_until_GC != NULL) {
2446     *new_cap_until_GC = new_value;
2447   }
2448   if (old_cap_until_GC != NULL) {
2449     *old_cap_until_GC = capacity_until_GC;
2450   }
2451   return true;
2452 }
2453 
2454 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2455   assert_is_aligned(v, Metaspace::commit_alignment());
2456 
2457   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2458 }
2459 
2460 void MetaspaceGC::initialize() {
2461   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2462   // we can't do a GC during initialization.
2463   _capacity_until_GC = MaxMetaspaceSize;
2464 }
2465 
2466 void MetaspaceGC::post_initialize() {
2467   // Reset the high-water mark once the VM initialization is done.
2468   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2469 }
2470 
2471 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2472   // Check if the compressed class space is full.
2473   if (is_class && Metaspace::using_class_space()) {
2474     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2475     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2476       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2477                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2478       return false;
2479     }
2480   }
2481 
2482   // Check if the user has imposed a limit on the metaspace memory.
2483   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2484   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2485     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2486               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2487     return false;
2488   }
2489 
2490   return true;
2491 }
2492 
2493 size_t MetaspaceGC::allowed_expansion() {
2494   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2495   size_t capacity_until_gc = capacity_until_GC();
2496 
2497   assert(capacity_until_gc >= committed_bytes,
2498          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2499          capacity_until_gc, committed_bytes);
2500 
2501   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2502   size_t left_until_GC = capacity_until_gc - committed_bytes;
2503   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2504   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2505             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2506             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2507 
2508   return left_to_commit / BytesPerWord;
2509 }
2510 
2511 void MetaspaceGC::compute_new_size() {
2512   assert(_shrink_factor <= 100, "invalid shrink factor");
2513   uint current_shrink_factor = _shrink_factor;
2514   _shrink_factor = 0;
2515 
2516   // Using committed_bytes() for used_after_gc is an overestimation, since the
2517   // chunk free lists are included in committed_bytes() and the memory in an
2518   // un-fragmented chunk free list is available for future allocations.
2519   // However, if the chunk free lists becomes fragmented, then the memory may
2520   // not be available for future allocations and the memory is therefore "in use".
2521   // Including the chunk free lists in the definition of "in use" is therefore
2522   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2523   // shrink below committed_bytes() and this has caused serious bugs in the past.
2524   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2525   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2526 
2527   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2528   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2529 
2530   const double min_tmp = used_after_gc / maximum_used_percentage;
2531   size_t minimum_desired_capacity =
2532     (size_t)MIN2(min_tmp, double(max_uintx));
2533   // Don't shrink less than the initial generation size
2534   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2535                                   MetaspaceSize);
2536 
2537   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2538   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2539                            minimum_free_percentage, maximum_used_percentage);
2540   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2541 
2542 
2543   size_t shrink_bytes = 0;
2544   if (capacity_until_GC < minimum_desired_capacity) {
2545     // If we have less capacity below the metaspace HWM, then
2546     // increment the HWM.
2547     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2548     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2549     // Don't expand unless it's significant
2550     if (expand_bytes >= MinMetaspaceExpansion) {
2551       size_t new_capacity_until_GC = 0;
2552       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2553       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2554 
2555       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2556                                                new_capacity_until_GC,
2557                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2558       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2559                                minimum_desired_capacity / (double) K,
2560                                expand_bytes / (double) K,
2561                                MinMetaspaceExpansion / (double) K,
2562                                new_capacity_until_GC / (double) K);
2563     }
2564     return;
2565   }
2566 
2567   // No expansion, now see if we want to shrink
2568   // We would never want to shrink more than this
2569   assert(capacity_until_GC >= minimum_desired_capacity,
2570          SIZE_FORMAT " >= " SIZE_FORMAT,
2571          capacity_until_GC, minimum_desired_capacity);
2572   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2573 
2574   // Should shrinking be considered?
2575   if (MaxMetaspaceFreeRatio < 100) {
2576     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2577     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2578     const double max_tmp = used_after_gc / minimum_used_percentage;
2579     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2580     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2581                                     MetaspaceSize);
2582     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2583                              maximum_free_percentage, minimum_used_percentage);
2584     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2585                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2586 
2587     assert(minimum_desired_capacity <= maximum_desired_capacity,
2588            "sanity check");
2589 
2590     if (capacity_until_GC > maximum_desired_capacity) {
2591       // Capacity too large, compute shrinking size
2592       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2593       // We don't want shrink all the way back to initSize if people call
2594       // System.gc(), because some programs do that between "phases" and then
2595       // we'd just have to grow the heap up again for the next phase.  So we
2596       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2597       // on the third call, and 100% by the fourth call.  But if we recompute
2598       // size without shrinking, it goes back to 0%.
2599       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2600 
2601       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2602 
2603       assert(shrink_bytes <= max_shrink_bytes,
2604              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2605              shrink_bytes, max_shrink_bytes);
2606       if (current_shrink_factor == 0) {
2607         _shrink_factor = 10;
2608       } else {
2609         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2610       }
2611       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2612                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2613       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2614                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2615     }
2616   }
2617 
2618   // Don't shrink unless it's significant
2619   if (shrink_bytes >= MinMetaspaceExpansion &&
2620       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2621     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2622     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2623                                              new_capacity_until_GC,
2624                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2625   }
2626 }
2627 
2628 // Metadebug methods
2629 
2630 void Metadebug::init_allocation_fail_alot_count() {
2631   if (MetadataAllocationFailALot) {
2632     _allocation_fail_alot_count =
2633       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2634   }
2635 }
2636 
2637 #ifdef ASSERT
2638 bool Metadebug::test_metadata_failure() {
2639   if (MetadataAllocationFailALot &&
2640       Threads::is_vm_complete()) {
2641     if (_allocation_fail_alot_count > 0) {
2642       _allocation_fail_alot_count--;
2643     } else {
2644       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2645       init_allocation_fail_alot_count();
2646       return true;
2647     }
2648   }
2649   return false;
2650 }
2651 #endif
2652 
2653 // ChunkManager methods
2654 size_t ChunkManager::free_chunks_total_words() {
2655   return _free_chunks_total;
2656 }
2657 
2658 size_t ChunkManager::free_chunks_total_bytes() {
2659   return free_chunks_total_words() * BytesPerWord;
2660 }
2661 
2662 // Update internal accounting after a chunk was added
2663 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2664   assert_lock_strong(MetaspaceExpand_lock);
2665   _free_chunks_count ++;
2666   _free_chunks_total += c->word_size();
2667 }
2668 
2669 // Update internal accounting after a chunk was removed
2670 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2671   assert_lock_strong(MetaspaceExpand_lock);
2672   assert(_free_chunks_count >= 1,
2673     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2674   assert(_free_chunks_total >= c->word_size(),
2675     "ChunkManager::_free_chunks_total: about to go negative"
2676      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2677   _free_chunks_count --;
2678   _free_chunks_total -= c->word_size();
2679 }
2680 
2681 size_t ChunkManager::free_chunks_count() {
2682 #ifdef ASSERT
2683   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
2684     MutexLockerEx cl(MetaspaceExpand_lock,
2685                      Mutex::_no_safepoint_check_flag);
2686     // This lock is only needed in debug because the verification
2687     // of the _free_chunks_totals walks the list of free chunks
2688     slow_locked_verify_free_chunks_count();
2689   }
2690 #endif
2691   return _free_chunks_count;
2692 }
2693 
2694 ChunkIndex ChunkManager::list_index(size_t size) {
2695   return get_chunk_type_by_size(size, is_class());
2696 }
2697 
2698 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2699   index_bounds_check(index);
2700   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2701   return get_size_for_nonhumongous_chunktype(index, is_class());
2702 }
2703 
2704 void ChunkManager::locked_verify_free_chunks_total() {
2705   assert_lock_strong(MetaspaceExpand_lock);
2706   assert(sum_free_chunks() == _free_chunks_total,
2707          "_free_chunks_total " SIZE_FORMAT " is not the"
2708          " same as sum " SIZE_FORMAT, _free_chunks_total,
2709          sum_free_chunks());
2710 }
2711 
2712 void ChunkManager::verify_free_chunks_total() {
2713   MutexLockerEx cl(MetaspaceExpand_lock,
2714                      Mutex::_no_safepoint_check_flag);
2715   locked_verify_free_chunks_total();
2716 }
2717 
2718 void ChunkManager::locked_verify_free_chunks_count() {
2719   assert_lock_strong(MetaspaceExpand_lock);
2720   assert(sum_free_chunks_count() == _free_chunks_count,
2721          "_free_chunks_count " SIZE_FORMAT " is not the"
2722          " same as sum " SIZE_FORMAT, _free_chunks_count,
2723          sum_free_chunks_count());
2724 }
2725 
2726 void ChunkManager::verify_free_chunks_count() {
2727 #ifdef ASSERT
2728   MutexLockerEx cl(MetaspaceExpand_lock,
2729                      Mutex::_no_safepoint_check_flag);
2730   locked_verify_free_chunks_count();
2731 #endif
2732 }
2733 
2734 void ChunkManager::verify() {
2735   MutexLockerEx cl(MetaspaceExpand_lock,
2736                      Mutex::_no_safepoint_check_flag);
2737   locked_verify();
2738 }
2739 
2740 void ChunkManager::locked_verify() {
2741   locked_verify_free_chunks_count();
2742   locked_verify_free_chunks_total();
2743   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2744     ChunkList* list = free_chunks(i);
2745     if (list != NULL) {
2746       Metachunk* chunk = list->head();
2747       while (chunk) {
2748         DEBUG_ONLY(do_verify_chunk(chunk);)
2749         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2750         chunk = chunk->next();
2751       }
2752     }
2753   }
2754 }
2755 
2756 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2757   assert_lock_strong(MetaspaceExpand_lock);
2758   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2759                 _free_chunks_total, _free_chunks_count);
2760 }
2761 
2762 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2763   assert_lock_strong(MetaspaceExpand_lock);
2764   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2765                 sum_free_chunks(), sum_free_chunks_count());
2766 }
2767 
2768 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2769   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2770          "Bad index: %d", (int)index);
2771 
2772   return &_free_chunks[index];
2773 }
2774 
2775 // These methods that sum the free chunk lists are used in printing
2776 // methods that are used in product builds.
2777 size_t ChunkManager::sum_free_chunks() {
2778   assert_lock_strong(MetaspaceExpand_lock);
2779   size_t result = 0;
2780   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2781     ChunkList* list = free_chunks(i);
2782 
2783     if (list == NULL) {
2784       continue;
2785     }
2786 
2787     result = result + list->count() * list->size();
2788   }
2789   result = result + humongous_dictionary()->total_size();
2790   return result;
2791 }
2792 
2793 size_t ChunkManager::sum_free_chunks_count() {
2794   assert_lock_strong(MetaspaceExpand_lock);
2795   size_t count = 0;
2796   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2797     ChunkList* list = free_chunks(i);
2798     if (list == NULL) {
2799       continue;
2800     }
2801     count = count + list->count();
2802   }
2803   count = count + humongous_dictionary()->total_free_blocks();
2804   return count;
2805 }
2806 
2807 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2808   ChunkIndex index = list_index(word_size);
2809   assert(index < HumongousIndex, "No humongous list");
2810   return free_chunks(index);
2811 }
2812 
2813 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2814 // split up the larger chunk into n smaller chunks, at least one of which should be
2815 // the target chunk of target chunk size. The smaller chunks, including the target
2816 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2817 // Note that this chunk is supposed to be removed from the freelist right away.
2818 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2819   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2820 
2821   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2822   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2823 
2824   MetaWord* const region_start = (MetaWord*)larger_chunk;
2825   const size_t region_word_len = larger_chunk->word_size();
2826   MetaWord* const region_end = region_start + region_word_len;
2827   VirtualSpaceNode* const vsn = larger_chunk->container();
2828   OccupancyMap* const ocmap = vsn->occupancy_map();
2829 
2830   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2831   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2832   // at an address suitable to place the smaller target chunk.
2833   assert_is_aligned(region_start, target_chunk_word_size);
2834 
2835   // Remove old chunk.
2836   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2837   larger_chunk->remove_sentinel();
2838 
2839   // Prevent access to the old chunk from here on.
2840   larger_chunk = NULL;
2841   // ... and wipe it.
2842   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2843 
2844   // In its place create first the target chunk...
2845   MetaWord* p = region_start;
2846   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2847   assert(target_chunk == (Metachunk*)p, "Sanity");
2848   target_chunk->set_origin(origin_split);
2849 
2850   // Note: we do not need to mark its start in the occupancy map
2851   // because it coincides with the old chunk start.
2852 
2853   // Mark chunk as free and return to the freelist.
2854   do_update_in_use_info_for_chunk(target_chunk, false);
2855   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2856 
2857   // This chunk should now be valid and can be verified.
2858   DEBUG_ONLY(do_verify_chunk(target_chunk));
2859 
2860   // In the remaining space create the remainder chunks.
2861   p += target_chunk->word_size();
2862   assert(p < region_end, "Sanity");
2863 
2864   while (p < region_end) {
2865 
2866     // Find the largest chunk size which fits the alignment requirements at address p.
2867     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2868     size_t this_chunk_word_size = 0;
2869     for(;;) {
2870       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2871       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2872         break;
2873       } else {
2874         this_chunk_index = prev_chunk_index(this_chunk_index);
2875         assert(this_chunk_index >= target_chunk_index, "Sanity");
2876       }
2877     }
2878 
2879     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2880     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2881     assert(p + this_chunk_word_size <= region_end, "Sanity");
2882 
2883     // Create splitting chunk.
2884     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2885     assert(this_chunk == (Metachunk*)p, "Sanity");
2886     this_chunk->set_origin(origin_split);
2887     ocmap->set_chunk_starts_at_address(p, true);
2888     do_update_in_use_info_for_chunk(this_chunk, false);
2889 
2890     // This chunk should be valid and can be verified.
2891     DEBUG_ONLY(do_verify_chunk(this_chunk));
2892 
2893     // Return this chunk to freelist and correct counter.
2894     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2895     _free_chunks_count ++;
2896 
2897     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2898       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2899       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2900       p2i(region_start), p2i(region_end));
2901 
2902     p += this_chunk_word_size;
2903 
2904   }
2905 
2906   return target_chunk;
2907 }
2908 
2909 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2910   assert_lock_strong(MetaspaceExpand_lock);
2911 
2912   slow_locked_verify();
2913 
2914   Metachunk* chunk = NULL;
2915   bool we_did_split_a_chunk = false;
2916 
2917   if (list_index(word_size) != HumongousIndex) {
2918 
2919     ChunkList* free_list = find_free_chunks_list(word_size);
2920     assert(free_list != NULL, "Sanity check");
2921 
2922     chunk = free_list->head();
2923 
2924     if (chunk == NULL) {
2925       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2926       // This is the counterpart of the coalescing-upon-chunk-return.
2927 
2928       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2929 
2930       // Is there a larger chunk we could split?
2931       Metachunk* larger_chunk = NULL;
2932       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2933       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2934         larger_chunk = free_chunks(larger_chunk_index)->head();
2935         if (larger_chunk == NULL) {
2936           larger_chunk_index = next_chunk_index(larger_chunk_index);
2937         }
2938       }
2939 
2940       if (larger_chunk != NULL) {
2941         assert(larger_chunk->word_size() > word_size, "Sanity");
2942         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2943 
2944         // We found a larger chunk. Lets split it up:
2945         // - remove old chunk
2946         // - in its place, create new smaller chunks, with at least one chunk
2947         //   being of target size, the others sized as large as possible. This
2948         //   is to make sure the resulting chunks are "as coalesced as possible"
2949         //   (similar to VirtualSpaceNode::retire()).
2950         // Note: during this operation both ChunkManager and VirtualSpaceNode
2951         //  are temporarily invalid, so be careful with asserts.
2952 
2953         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2954            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2955           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2956           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2957 
2958         chunk = split_chunk(word_size, larger_chunk);
2959 
2960         // This should have worked.
2961         assert(chunk != NULL, "Sanity");
2962         assert(chunk->word_size() == word_size, "Sanity");
2963         assert(chunk->is_tagged_free(), "Sanity");
2964 
2965         we_did_split_a_chunk = true;
2966 
2967       }
2968     }
2969 
2970     if (chunk == NULL) {
2971       return NULL;
2972     }
2973 
2974     // Remove the chunk as the head of the list.
2975     free_list->remove_chunk(chunk);
2976 
2977     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2978                                        p2i(free_list), free_list->count());
2979 
2980   } else {
2981     chunk = humongous_dictionary()->get_chunk(word_size);
2982 
2983     if (chunk == NULL) {
2984       return NULL;
2985     }
2986 
2987     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2988                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2989   }
2990 
2991   // Chunk has been removed from the chunk manager; update counters.
2992   account_for_removed_chunk(chunk);
2993   do_update_in_use_info_for_chunk(chunk, true);
2994   chunk->container()->inc_container_count();
2995   chunk->inc_use_count();
2996 
2997   // Remove it from the links to this freelist
2998   chunk->set_next(NULL);
2999   chunk->set_prev(NULL);
3000 
3001   // Run some verifications (some more if we did a chunk split)
3002 #ifdef ASSERT
3003   if (VerifyMetaspace) {
3004     locked_verify();
3005     VirtualSpaceNode* const vsn = chunk->container();
3006     vsn->verify();
3007     if (we_did_split_a_chunk) {
3008       vsn->verify_free_chunks_are_ideally_merged();
3009     }
3010   }
3011 #endif
3012 
3013   return chunk;
3014 }
3015 
3016 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
3017   assert_lock_strong(MetaspaceExpand_lock);
3018   slow_locked_verify();
3019 
3020   // Take from the beginning of the list
3021   Metachunk* chunk = free_chunks_get(word_size);
3022   if (chunk == NULL) {
3023     return NULL;
3024   }
3025 
3026   assert((word_size <= chunk->word_size()) ||
3027          (list_index(chunk->word_size()) == HumongousIndex),
3028          "Non-humongous variable sized chunk");
3029   LogTarget(Debug, gc, metaspace, freelist) lt;
3030   if (lt.is_enabled()) {
3031     size_t list_count;
3032     if (list_index(word_size) < HumongousIndex) {
3033       ChunkList* list = find_free_chunks_list(word_size);
3034       list_count = list->count();
3035     } else {
3036       list_count = humongous_dictionary()->total_count();
3037     }
3038     LogStream ls(lt);
3039     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3040              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3041     ResourceMark rm;
3042     locked_print_free_chunks(&ls);
3043     ls.cr(); // ~LogStream does not autoflush.
3044   }
3045 
3046   return chunk;
3047 }
3048 
3049 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3050   assert_lock_strong(MetaspaceExpand_lock);
3051   DEBUG_ONLY(do_verify_chunk(chunk);)
3052   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3053   assert(chunk != NULL, "Expected chunk.");
3054   assert(chunk->container() != NULL, "Container should have been set.");
3055   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3056   index_bounds_check(index);
3057 
3058   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3059   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3060   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3061   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3062 
3063   if (index != HumongousIndex) {
3064     // Return non-humongous chunk to freelist.
3065     ChunkList* list = free_chunks(index);
3066     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3067     list->return_chunk_at_head(chunk);
3068     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3069         chunk_size_name(index), p2i(chunk));
3070   } else {
3071     // Return humongous chunk to dictionary.
3072     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3073     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3074            "Humongous chunk has wrong alignment.");
3075     _humongous_dictionary.return_chunk(chunk);
3076     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3077         chunk_size_name(index), p2i(chunk), chunk->word_size());
3078   }
3079   chunk->container()->dec_container_count();
3080   do_update_in_use_info_for_chunk(chunk, false);
3081 
3082   // Chunk has been added; update counters.
3083   account_for_added_chunk(chunk);
3084 
3085   // Attempt coalesce returned chunks with its neighboring chunks:
3086   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3087   if (index == SmallIndex || index == SpecializedIndex) {
3088     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3089       // This did not work. But if this chunk is special, we still may form a small chunk?
3090       if (index == SpecializedIndex) {
3091         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3092           // give up.
3093         }
3094       }
3095     }
3096   }
3097 
3098 }
3099 
3100 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3101   index_bounds_check(index);
3102   if (chunks == NULL) {
3103     return;
3104   }
3105   LogTarget(Trace, gc, metaspace, freelist) log;
3106   if (log.is_enabled()) { // tracing
3107     log.print("returning list of %s chunks...", chunk_size_name(index));
3108   }
3109   unsigned num_chunks_returned = 0;
3110   size_t size_chunks_returned = 0;
3111   Metachunk* cur = chunks;
3112   while (cur != NULL) {
3113     // Capture the next link before it is changed
3114     // by the call to return_chunk_at_head();
3115     Metachunk* next = cur->next();
3116     if (log.is_enabled()) { // tracing
3117       num_chunks_returned ++;
3118       size_chunks_returned += cur->word_size();
3119     }
3120     return_single_chunk(index, cur);
3121     cur = next;
3122   }
3123   if (log.is_enabled()) { // tracing
3124     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3125         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3126     if (index != HumongousIndex) {
3127       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3128     } else {
3129       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3130     }
3131   }
3132 }
3133 
3134 void ChunkManager::print_on(outputStream* out) const {
3135   _humongous_dictionary.report_statistics(out);
3136 }
3137 
3138 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
3139   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3140   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3141     out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
3142   }
3143 }
3144 
3145 // SpaceManager methods
3146 
3147 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3148   size_t chunk_sizes[] = {
3149       specialized_chunk_size(is_class_space),
3150       small_chunk_size(is_class_space),
3151       medium_chunk_size(is_class_space)
3152   };
3153 
3154   // Adjust up to one of the fixed chunk sizes ...
3155   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3156     if (requested <= chunk_sizes[i]) {
3157       return chunk_sizes[i];
3158     }
3159   }
3160 
3161   // ... or return the size as a humongous chunk.
3162   return requested;
3163 }
3164 
3165 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3166   return adjust_initial_chunk_size(requested, is_class());
3167 }
3168 
3169 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3170   size_t requested;
3171 
3172   if (is_class()) {
3173     switch (type) {
3174     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3175     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3176     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3177     default:                                 requested = ClassSmallChunk; break;
3178     }
3179   } else {
3180     switch (type) {
3181     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3182     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3183     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3184     default:                                 requested = SmallChunk; break;
3185     }
3186   }
3187 
3188   // Adjust to one of the fixed chunk sizes (unless humongous)
3189   const size_t adjusted = adjust_initial_chunk_size(requested);
3190 
3191   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3192          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3193 
3194   return adjusted;
3195 }
3196 
3197 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3198   size_t count = 0;
3199   Metachunk* chunk = chunks_in_use(i);
3200   while (chunk != NULL) {
3201     count++;
3202     chunk = chunk->next();
3203   }
3204   return count;
3205 }
3206 
3207 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3208 
3209   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3210     Metachunk* chunk = chunks_in_use(i);
3211     st->print("SpaceManager: %s " PTR_FORMAT,
3212                  chunk_size_name(i), p2i(chunk));
3213     if (chunk != NULL) {
3214       st->print_cr(" free " SIZE_FORMAT,
3215                    chunk->free_word_size());
3216     } else {
3217       st->cr();
3218     }
3219   }
3220 
3221   chunk_manager()->locked_print_free_chunks(st);
3222   chunk_manager()->locked_print_sum_free_chunks(st);
3223 }
3224 
3225 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3226 
3227   // Decide between a small chunk and a medium chunk.  Up to
3228   // _small_chunk_limit small chunks can be allocated.
3229   // After that a medium chunk is preferred.
3230   size_t chunk_word_size;
3231 
3232   // Special case for anonymous metadata space.
3233   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3234   // rarely about 4K (64-bits JVM).
3235   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3236   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3237   // reduces space waste from 60+% to around 30%.
3238   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3239       _mdtype == Metaspace::NonClassType &&
3240       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3241       word_size + Metachunk::overhead() <= SpecializedChunk) {
3242     return SpecializedChunk;
3243   }
3244 
3245   if (chunks_in_use(MediumIndex) == NULL &&
3246       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3247     chunk_word_size = (size_t) small_chunk_size();
3248     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3249       chunk_word_size = medium_chunk_size();
3250     }
3251   } else {
3252     chunk_word_size = medium_chunk_size();
3253   }
3254 
3255   // Might still need a humongous chunk.  Enforce
3256   // humongous allocations sizes to be aligned up to
3257   // the smallest chunk size.
3258   size_t if_humongous_sized_chunk =
3259     align_up(word_size + Metachunk::overhead(),
3260                   smallest_chunk_size());
3261   chunk_word_size =
3262     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3263 
3264   assert(!SpaceManager::is_humongous(word_size) ||
3265          chunk_word_size == if_humongous_sized_chunk,
3266          "Size calculation is wrong, word_size " SIZE_FORMAT
3267          " chunk_word_size " SIZE_FORMAT,
3268          word_size, chunk_word_size);
3269   Log(gc, metaspace, alloc) log;
3270   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3271     log.debug("Metadata humongous allocation:");
3272     log.debug("  word_size " PTR_FORMAT, word_size);
3273     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3274     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3275   }
3276   return chunk_word_size;
3277 }
3278 
3279 void SpaceManager::track_metaspace_memory_usage() {
3280   if (is_init_completed()) {
3281     if (is_class()) {
3282       MemoryService::track_compressed_class_memory_usage();
3283     }
3284     MemoryService::track_metaspace_memory_usage();
3285   }
3286 }
3287 
3288 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3289   assert_lock_strong(_lock);
3290   assert(vs_list()->current_virtual_space() != NULL,
3291          "Should have been set");
3292   assert(current_chunk() == NULL ||
3293          current_chunk()->allocate(word_size) == NULL,
3294          "Don't need to expand");
3295   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3296 
3297   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3298     size_t words_left = 0;
3299     size_t words_used = 0;
3300     if (current_chunk() != NULL) {
3301       words_left = current_chunk()->free_word_size();
3302       words_used = current_chunk()->used_word_size();
3303     }
3304     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3305                                        word_size, words_used, words_left);
3306   }
3307 
3308   // Get another chunk
3309   size_t chunk_word_size = calc_chunk_size(word_size);
3310   Metachunk* next = get_new_chunk(chunk_word_size);
3311 
3312   MetaWord* mem = NULL;
3313 
3314   // If a chunk was available, add it to the in-use chunk list
3315   // and do an allocation from it.
3316   if (next != NULL) {
3317     // Add to this manager's list of chunks in use.
3318     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3319     // case it usually makes no sense to make it the current chunk, since the next allocation would
3320     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3321     // good chunk which could be used for more normal allocations.
3322     bool make_current = true;
3323     if (next->get_chunk_type() == HumongousIndex &&
3324         current_chunk() != NULL) {
3325       make_current = false;
3326     }
3327     add_chunk(next, make_current);
3328     mem = next->allocate(word_size);
3329   }
3330 
3331   // Track metaspace memory usage statistic.
3332   track_metaspace_memory_usage();
3333 
3334   return mem;
3335 }
3336 
3337 void SpaceManager::print_on(outputStream* st) const {
3338   SpaceManagerStatistics stat;
3339   add_to_statistics(&stat); // will lock _lock.
3340   stat.print_on(st, 1*K, false);
3341 }
3342 
3343 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3344                            Metaspace::MetaspaceType space_type,
3345                            Mutex* lock) :
3346   _mdtype(mdtype),
3347   _space_type(space_type),
3348   _capacity_words(0),
3349   _used_words(0),
3350   _overhead_words(0),
3351   _block_freelists(NULL),
3352   _lock(lock)
3353 {
3354   initialize();
3355 }
3356 
3357 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
3358 
3359   assert_lock_strong(MetaspaceExpand_lock);
3360 
3361   _capacity_words += new_chunk->word_size();
3362   _overhead_words += Metachunk::overhead();
3363 
3364   // Adjust global counters:
3365   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
3366   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
3367 }
3368 
3369 void SpaceManager::account_for_allocation(size_t words) {
3370   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
3371   // We may or may not be locked with the global metaspace expansion lock.
3372   assert_lock_strong(lock());
3373 
3374   // Add to the per SpaceManager totals. This can be done non-atomically.
3375   _used_words += words;
3376 
3377   // Adjust global counters. This will be done atomically.
3378   MetaspaceUtils::inc_used(mdtype(), words);
3379 }
3380 
3381 void SpaceManager::account_for_spacemanager_death() {
3382 
3383   assert_lock_strong(MetaspaceExpand_lock);
3384 
3385   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
3386   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
3387   MetaspaceUtils::dec_used(mdtype(), _used_words);
3388 }
3389 
3390 void SpaceManager::initialize() {
3391   Metadebug::init_allocation_fail_alot_count();
3392   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3393     _chunks_in_use[i] = NULL;
3394   }
3395   _current_chunk = NULL;
3396   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3397 }
3398 
3399 SpaceManager::~SpaceManager() {
3400 
3401   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3402   DEBUG_ONLY(verify_metrics());
3403 
3404   MutexLockerEx fcl(MetaspaceExpand_lock,
3405                     Mutex::_no_safepoint_check_flag);
3406 
3407   chunk_manager()->slow_locked_verify();
3408 
3409   account_for_spacemanager_death();
3410 
3411   Log(gc, metaspace, freelist) log;
3412   if (log.is_trace()) {
3413     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3414     ResourceMark rm;
3415     LogStream ls(log.trace());
3416     locked_print_chunks_in_use_on(&ls);
3417     if (block_freelists() != NULL) {
3418       block_freelists()->print_on(&ls);
3419     }
3420     ls.cr(); // ~LogStream does not autoflush.
3421   }
3422 
3423   // Add all the chunks in use by this space manager
3424   // to the global list of free chunks.
3425 
3426   // Follow each list of chunks-in-use and add them to the
3427   // free lists.  Each list is NULL terminated.
3428 
3429   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3430     Metachunk* chunks = chunks_in_use(i);
3431     chunk_manager()->return_chunk_list(i, chunks);
3432     set_chunks_in_use(i, NULL);
3433   }
3434 
3435   chunk_manager()->slow_locked_verify();
3436 
3437   if (_block_freelists != NULL) {
3438     delete _block_freelists;
3439   }
3440 }
3441 
3442 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3443   assert_lock_strong(lock());
3444   // Allocations and deallocations are in raw_word_size
3445   size_t raw_word_size = get_allocation_word_size(word_size);
3446   // Lazily create a block_freelist
3447   if (block_freelists() == NULL) {
3448     _block_freelists = new BlockFreelist();
3449   }
3450   block_freelists()->return_block(p, raw_word_size);
3451   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
3452 }
3453 
3454 // Adds a chunk to the list of chunks in use.
3455 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3456 
3457   assert_lock_strong(_lock);
3458   assert(new_chunk != NULL, "Should not be NULL");
3459   assert(new_chunk->next() == NULL, "Should not be on a list");
3460 
3461   new_chunk->reset_empty();
3462 
3463   // Find the correct list and and set the current
3464   // chunk for that list.
3465   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3466 
3467   if (make_current) {
3468     // If we are to make the chunk current, retire the old current chunk and replace
3469     // it with the new chunk.
3470     retire_current_chunk();
3471     set_current_chunk(new_chunk);
3472   }
3473 
3474   // Add the new chunk at the head of its respective chunk list.
3475   new_chunk->set_next(chunks_in_use(index));
3476   set_chunks_in_use(index, new_chunk);
3477 
3478   // Adjust counters.
3479   account_for_new_chunk(new_chunk);
3480 
3481   assert(new_chunk->is_empty(), "Not ready for reuse");
3482   Log(gc, metaspace, freelist) log;
3483   if (log.is_trace()) {
3484     log.trace("SpaceManager::added chunk: ");
3485     ResourceMark rm;
3486     LogStream ls(log.trace());
3487     new_chunk->print_on(&ls);
3488     chunk_manager()->locked_print_free_chunks(&ls);
3489     ls.cr(); // ~LogStream does not autoflush.
3490   }
3491 }
3492 
3493 void SpaceManager::retire_current_chunk() {
3494   if (current_chunk() != NULL) {
3495     size_t remaining_words = current_chunk()->free_word_size();
3496     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3497       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3498       deallocate(ptr, remaining_words);
3499       account_for_allocation(remaining_words);
3500     }
3501   }
3502 }
3503 
3504 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3505   // Get a chunk from the chunk freelist
3506   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3507 
3508   if (next == NULL) {
3509     next = vs_list()->get_new_chunk(chunk_word_size,
3510                                     medium_chunk_bunch());
3511   }
3512 
3513   Log(gc, metaspace, alloc) log;
3514   if (log.is_debug() && next != NULL &&
3515       SpaceManager::is_humongous(next->word_size())) {
3516     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3517   }
3518 
3519   return next;
3520 }
3521 
3522 MetaWord* SpaceManager::allocate(size_t word_size) {
3523   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3524   size_t raw_word_size = get_allocation_word_size(word_size);
3525   BlockFreelist* fl =  block_freelists();
3526   MetaWord* p = NULL;
3527 
3528   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
3529 
3530   // Allocation from the dictionary is expensive in the sense that
3531   // the dictionary has to be searched for a size.  Don't allocate
3532   // from the dictionary until it starts to get fat.  Is this
3533   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3534   // for allocations.  Do some profiling.  JJJ
3535   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3536     p = fl->get_block(raw_word_size);
3537     if (p != NULL) {
3538       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3539     }
3540   }
3541   if (p == NULL) {
3542     p = allocate_work(raw_word_size);
3543   }
3544 
3545   return p;
3546 }
3547 
3548 // Returns the address of spaced allocated for "word_size".
3549 // This methods does not know about blocks (Metablocks)
3550 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3551   assert_lock_strong(lock());
3552 #ifdef ASSERT
3553   if (Metadebug::test_metadata_failure()) {
3554     return NULL;
3555   }
3556 #endif
3557   // Is there space in the current chunk?
3558   MetaWord* result = NULL;
3559 
3560   if (current_chunk() != NULL) {
3561     result = current_chunk()->allocate(word_size);
3562   }
3563 
3564   if (result == NULL) {
3565     result = grow_and_allocate(word_size);
3566   }
3567 
3568   if (result != NULL) {
3569     account_for_allocation(word_size);
3570     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3571            "Head of the list is being allocated");
3572   }
3573 
3574   return result;
3575 }
3576 
3577 void SpaceManager::verify() {
3578   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3579     Metachunk* curr = chunks_in_use(i);
3580     while (curr != NULL) {
3581       DEBUG_ONLY(do_verify_chunk(curr);)
3582       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3583       curr = curr->next();
3584     }
3585   }
3586 }
3587 
3588 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3589   assert(is_humongous(chunk->word_size()) ||
3590          chunk->word_size() == medium_chunk_size() ||
3591          chunk->word_size() == small_chunk_size() ||
3592          chunk->word_size() == specialized_chunk_size(),
3593          "Chunk size is wrong");
3594   return;
3595 }
3596 
3597 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3598   assert_lock_strong(lock());
3599   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3600     UsedChunksStatistics& chunk_stat = out->chunk_stats(i);
3601     Metachunk* chunk = chunks_in_use(i);
3602     while (chunk != NULL) {
3603       chunk_stat.add_num(1);
3604       chunk_stat.add_cap(chunk->word_size());
3605       chunk_stat.add_overhead(Metachunk::overhead());
3606       chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
3607       if (chunk != current_chunk()) {
3608         chunk_stat.add_waste(chunk->free_word_size());
3609       } else {
3610         chunk_stat.add_free(chunk->free_word_size());
3611       }
3612       chunk = chunk->next();
3613     }
3614   }
3615   if (block_freelists() != NULL) {
3616     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
3617   }
3618 }
3619 
3620 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3621   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3622   add_to_statistics_locked(out);
3623 }
3624 
3625 #ifdef ASSERT
3626 void SpaceManager::verify_metrics_locked() const {
3627   assert_lock_strong(lock());
3628 
3629   SpaceManagerStatistics stat;
3630   add_to_statistics_locked(&stat);
3631 
3632   UsedChunksStatistics chunk_stats = stat.totals();
3633 
3634   DEBUG_ONLY(chunk_stats.check_sanity());
3635 
3636   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
3637   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
3638   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
3639 }
3640 
3641 void SpaceManager::verify_metrics() const {
3642   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3643   verify_metrics_locked();
3644 }
3645 #endif // ASSERT
3646 
3647 
3648 
3649 // MetaspaceUtils
3650 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
3651 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
3652 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
3653 
3654 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
3655 // output will be the accumulated values for all live metaspaces.
3656 // Note: method does not do any locking.
3657 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
3658   out->reset();
3659   ClassLoaderDataGraphMetaspaceIterator iter;
3660    while (iter.repeat()) {
3661      ClassLoaderMetaspace* msp = iter.get_next();
3662      if (msp != NULL) {
3663        msp->add_to_statistics(out);
3664      }
3665    }
3666 }
3667 
3668 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
3669   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3670   return list == NULL ? 0 : list->free_bytes();
3671 }
3672 
3673 size_t MetaspaceUtils::free_in_vs_bytes() {
3674   return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
3675 }
3676 
3677 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
3678   assert_lock_strong(MetaspaceExpand_lock);
3679   (*pstat) += words;
3680 }
3681 
3682 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
3683   assert_lock_strong(MetaspaceExpand_lock);
3684   const size_t size_now = *pstat;
3685   assert(size_now >= words, "About to decrement counter below zero "
3686          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3687          size_now, words);
3688   *pstat = size_now - words;
3689 }
3690 
3691 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
3692   Atomic::add(words, pstat);
3693 }
3694 
3695 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
3696   const size_t size_now = *pstat;
3697   assert(size_now >= words, "About to decrement counter below zero "
3698          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3699          size_now, words);
3700   Atomic::sub(words, pstat);
3701 }
3702 
3703 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3704   dec_stat_nonatomically(&_capacity_words[mdtype], words);
3705 }
3706 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3707   inc_stat_nonatomically(&_capacity_words[mdtype], words);
3708 }
3709 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3710   dec_stat_atomically(&_used_words[mdtype], words);
3711 }
3712 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3713   inc_stat_atomically(&_used_words[mdtype], words);
3714 }
3715 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
3716   dec_stat_nonatomically(&_overhead_words[mdtype], words);
3717 }
3718 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
3719   inc_stat_nonatomically(&_overhead_words[mdtype], words);
3720 }
3721 
3722 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3723   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3724   return list == NULL ? 0 : list->reserved_bytes();
3725 }
3726 
3727 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3728   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3729   return list == NULL ? 0 : list->committed_bytes();
3730 }
3731 
3732 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3733 
3734 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3735   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3736   if (chunk_manager == NULL) {
3737     return 0;
3738   }
3739   chunk_manager->slow_verify();
3740   return chunk_manager->free_chunks_total_words();
3741 }
3742 
3743 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3744   return free_chunks_total_words(mdtype) * BytesPerWord;
3745 }
3746 
3747 size_t MetaspaceUtils::free_chunks_total_words() {
3748   return free_chunks_total_words(Metaspace::ClassType) +
3749          free_chunks_total_words(Metaspace::NonClassType);
3750 }
3751 
3752 size_t MetaspaceUtils::free_chunks_total_bytes() {
3753   return free_chunks_total_words() * BytesPerWord;
3754 }
3755 
3756 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3757   return Metaspace::get_chunk_manager(mdtype) != NULL;
3758 }
3759 
3760 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3761   if (!has_chunk_free_list(mdtype)) {
3762     return MetaspaceChunkFreeListSummary();
3763   }
3764 
3765   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3766   return cm->chunk_free_list_summary();
3767 }
3768 
3769 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3770   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3771                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3772 }
3773 
3774 void MetaspaceUtils::print_on(outputStream* out) {
3775   Metaspace::MetadataType nct = Metaspace::NonClassType;
3776 
3777   out->print_cr(" Metaspace       "
3778                 "used "      SIZE_FORMAT "K, "
3779                 "capacity "  SIZE_FORMAT "K, "
3780                 "committed " SIZE_FORMAT "K, "
3781                 "reserved "  SIZE_FORMAT "K",
3782                 used_bytes()/K,
3783                 capacity_bytes()/K,
3784                 committed_bytes()/K,
3785                 reserved_bytes()/K);
3786 
3787   if (Metaspace::using_class_space()) {
3788     Metaspace::MetadataType ct = Metaspace::ClassType;
3789     out->print_cr("  class space    "
3790                   "used "      SIZE_FORMAT "K, "
3791                   "capacity "  SIZE_FORMAT "K, "
3792                   "committed " SIZE_FORMAT "K, "
3793                   "reserved "  SIZE_FORMAT "K",
3794                   used_bytes(ct)/K,
3795                   capacity_bytes(ct)/K,
3796                   committed_bytes(ct)/K,
3797                   reserved_bytes(ct)/K);
3798   }
3799 }
3800 
3801 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
3802 private:
3803   outputStream* const _out;
3804   const size_t        _scale;
3805   const bool          _do_print;
3806   const bool          _break_down_by_chunktype;
3807 
3808 public:
3809 
3810   uintx                           _num_loaders;
3811   ClassLoaderMetaspaceStatistics  _stats_total;
3812 
3813   uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
3814   ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
3815 
3816 public:
3817   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
3818     : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
3819     , _num_loaders(0)
3820   {
3821     memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
3822   }
3823 
3824   void do_cld(ClassLoaderData* cld) {
3825 
3826     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3827 
3828     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
3829     if (msp == NULL) {
3830       return;
3831     }
3832 
3833     // Collect statistics for this class loader metaspace
3834     ClassLoaderMetaspaceStatistics this_cld_stat;
3835     msp->add_to_statistics(&this_cld_stat);
3836 
3837     // And add it to the running totals
3838     _stats_total.add(this_cld_stat);
3839     _num_loaders ++;
3840     _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
3841     _num_loaders_by_spacetype[msp->space_type()] ++;
3842 
3843     // Optionally, print.
3844     if (_do_print) {
3845 
3846       _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
3847 
3848       if (cld->is_anonymous()) {
3849         _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
3850       } else {
3851         ResourceMark rm;
3852         _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
3853       }
3854 
3855       if (cld->is_unloading()) {
3856         _out->print(" (unloading)");
3857       }
3858 
3859       this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
3860       _out->cr();
3861 
3862     }
3863 
3864   } // do_cld
3865 
3866 };
3867 
3868 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
3869   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3870   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3871   {
3872     if (Metaspace::using_class_space()) {
3873       out->print("  Non-class space:  ");
3874     }
3875     print_scaled_words(out, reserved_nonclass_words, scale, 7);
3876     out->print(" reserved, ");
3877     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
3878     out->print_cr(" committed ");
3879 
3880     if (Metaspace::using_class_space()) {
3881       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3882       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3883       out->print("      Class space:  ");
3884       print_scaled_words(out, reserved_class_words, scale, 7);
3885       out->print(" reserved, ");
3886       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
3887       out->print_cr(" committed ");
3888 
3889       const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
3890       const size_t committed_words = committed_nonclass_words + committed_class_words;
3891       out->print("             Both:  ");
3892       print_scaled_words(out, reserved_words, scale, 7);
3893       out->print(" reserved, ");
3894       print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
3895       out->print_cr(" committed ");
3896     }
3897   }
3898 }
3899 
3900 // This will print out a basic metaspace usage report but
3901 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
3902 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
3903 
3904   out->cr();
3905   out->print_cr("Usage:");
3906 
3907   if (Metaspace::using_class_space()) {
3908     out->print("  Non-class:  ");
3909   }
3910 
3911   // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
3912   // MetaspaceUtils.
3913   const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
3914   const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
3915   const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
3916   const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
3917 
3918   print_scaled_words(out, cap_nc, scale, 5);
3919   out->print(" capacity, ");
3920   print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
3921   out->print(" used, ");
3922   print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
3923   out->print(" free+waste, ");
3924   print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
3925   out->print(" overhead. ");
3926   out->cr();
3927 
3928   if (Metaspace::using_class_space()) {
3929     const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
3930     const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
3931     const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
3932     const size_t free_and_waste_c = cap_c - overhead_c - used_c;
3933     out->print("      Class:  ");
3934     print_scaled_words(out, cap_c, scale, 5);
3935     out->print(" capacity, ");
3936     print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
3937     out->print(" used, ");
3938     print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
3939     out->print(" free+waste, ");
3940     print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
3941     out->print(" overhead. ");
3942     out->cr();
3943 
3944     out->print("       Both:  ");
3945     const size_t cap = cap_nc + cap_c;
3946 
3947     print_scaled_words(out, cap, scale, 5);
3948     out->print(" capacity, ");
3949     print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
3950     out->print(" used, ");
3951     print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
3952     out->print(" free+waste, ");
3953     print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
3954     out->print(" overhead. ");
3955     out->cr();
3956   }
3957 
3958   out->cr();
3959   out->print_cr("Virtual space:");
3960 
3961   print_vs(out, scale);
3962 
3963   out->cr();
3964   out->print_cr("Chunk freelists:");
3965 
3966   if (Metaspace::using_class_space()) {
3967     out->print("   Non-Class:  ");
3968   }
3969   out->print_human_readable_size(Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3970   out->cr();
3971   if (Metaspace::using_class_space()) {
3972     out->print("       Class:  ");
3973     out->print_human_readable_size(Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
3974     out->cr();
3975     out->print("        Both:  ");
3976     out->print_human_readable_size(Metaspace::chunk_manager_class()->free_chunks_total_words() +
3977                        Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3978     out->cr();
3979   }
3980   out->cr();
3981 
3982 }
3983 
3984 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
3985 
3986   const bool print_loaders = (flags & rf_show_loaders) > 0;
3987   const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
3988   const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
3989 
3990   // Some report options require walking the class loader data graph.
3991   PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
3992   if (print_loaders) {
3993     out->cr();
3994     out->print_cr("Usage per loader:");
3995     out->cr();
3996   }
3997 
3998   ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
3999 
4000   // Print totals, broken up by space type.
4001   if (print_by_spacetype) {
4002     out->cr();
4003     out->print_cr("Usage per space type:");
4004     out->cr();
4005     for (int space_type = (int)Metaspace::ZeroMetaspaceType;
4006          space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
4007     {
4008       uintx num = cl._num_loaders_by_spacetype[space_type];
4009       out->print("%s (" UINTX_FORMAT " loader%s)%c",
4010         space_type_name((Metaspace::MetaspaceType)space_type),
4011         num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
4012       if (num > 0) {
4013         cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
4014       }
4015       out->cr();
4016     }
4017   }
4018 
4019   // Print totals for in-use data:
4020   out->cr();
4021   out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
4022       cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
4023 
4024   cl._stats_total.print_on(out, scale, print_by_chunktype);
4025 
4026   // -- Print Virtual space.
4027   out->cr();
4028   out->print_cr("Virtual space:");
4029 
4030   print_vs(out, scale);
4031 
4032   // -- Print VirtualSpaceList details.
4033   if ((flags & rf_show_vslist) > 0) {
4034     out->cr();
4035     out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
4036 
4037     if (Metaspace::using_class_space()) {
4038       out->print_cr("   Non-Class:");
4039     }
4040     Metaspace::space_list()->print_on(out, scale);
4041     if (Metaspace::using_class_space()) {
4042       out->print_cr("       Class:");
4043       Metaspace::class_space_list()->print_on(out, scale);
4044     }
4045   }
4046   out->cr();
4047 
4048   // -- Print VirtualSpaceList map.
4049   if ((flags & rf_show_vsmap) > 0) {
4050     out->cr();
4051     out->print_cr("Virtual space map:");
4052 
4053     if (Metaspace::using_class_space()) {
4054       out->print_cr("   Non-Class:");
4055     }
4056     Metaspace::space_list()->print_map(out);
4057     if (Metaspace::using_class_space()) {
4058       out->print_cr("       Class:");
4059       Metaspace::class_space_list()->print_map(out);
4060     }
4061   }
4062   out->cr();
4063 
4064   // -- Print Freelists (ChunkManager) details
4065   out->cr();
4066   out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
4067 
4068   ChunkManagerStatistics non_class_cm_stat;
4069   Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
4070 
4071   if (Metaspace::using_class_space()) {
4072     out->print_cr("   Non-Class:");
4073   }
4074   non_class_cm_stat.print_on(out, scale);
4075 
4076   if (Metaspace::using_class_space()) {
4077     ChunkManagerStatistics class_cm_stat;
4078     Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
4079     out->print_cr("       Class:");
4080     class_cm_stat.print_on(out, scale);
4081   }
4082 
4083   // As a convenience, print a summary of common waste.
4084   out->cr();
4085   out->print("Waste: ");
4086   // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
4087   const size_t committed_words = committed_bytes() / BytesPerWord;
4088 
4089   out->print("(Percentage values refer to total committed size (");
4090   print_scaled_words(out, committed_words, scale);
4091   out->print_cr(").");
4092 
4093   // Print waste for in-use chunks.
4094   UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
4095   UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
4096   UsedChunksStatistics ucs_all;
4097   ucs_all.add(ucs_nonclass);
4098   ucs_all.add(ucs_class);
4099   out->print("        Waste in chunks in use: ");
4100   print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
4101   out->cr();
4102   out->print("         Free in chunks in use: ");
4103   print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
4104   out->cr();
4105 
4106   // Print waste in free chunks.
4107   const size_t total_capacity_in_free_chunks =
4108       Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
4109      (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
4110   out->print("                In free chunks: ");
4111   print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
4112   out->cr();
4113 
4114   // Print waste in deallocated blocks.
4115   const uintx free_blocks_num =
4116       cl._stats_total.nonclass_sm_stats().free_blocks_num() +
4117       cl._stats_total.class_sm_stats().free_blocks_num();
4118   const size_t free_blocks_cap_words =
4119       cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
4120       cl._stats_total.class_sm_stats().free_blocks_cap_words();
4121   out->print("Deallocated from chunks in use: " UINTX_FORMAT " blocks, total size ", free_blocks_num);
4122   print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
4123   out->cr();
4124 
4125   // Print internal statistics
4126 #ifdef ASSERT
4127   out->cr();
4128   out->cr();
4129   out->print_cr("Internal statistics:");
4130   out->cr();
4131   out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
4132   out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
4133   out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
4134   out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
4135   out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
4136   out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
4137   out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
4138   out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
4139   out->cr();
4140 #endif
4141 
4142   // Print some interesting settings
4143   out->cr();
4144   out->cr();
4145   out->print("MaxMetaspaceSize: ");
4146   out->print_human_readable_size(MaxMetaspaceSize, scale);
4147   out->cr();
4148   out->print("InitialBootClassLoaderMetaspaceSize: ");
4149   out->print_human_readable_size(InitialBootClassLoaderMetaspaceSize, scale);
4150   out->cr();
4151 
4152   out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
4153   out->cr();
4154   if (Metaspace::using_class_space()) {
4155     out->print("CompressedClassSpaceSize: ");
4156     out->print_human_readable_size(CompressedClassSpaceSize, scale);
4157   }
4158 
4159   out->cr();
4160   out->cr();
4161 
4162 } // MetaspaceUtils::print_report()
4163 
4164 // Prints an ASCII representation of the given space.
4165 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4166   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4167   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4168   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4169   if (vsl != NULL) {
4170     if (for_class) {
4171       if (!Metaspace::using_class_space()) {
4172         out->print_cr("No Class Space.");
4173         return;
4174       }
4175       out->print_raw("---- Metaspace Map (Class Space) ----");
4176     } else {
4177       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4178     }
4179     // Print legend:
4180     out->cr();
4181     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4182     out->cr();
4183     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4184     vsl->print_map(out);
4185     out->cr();
4186   }
4187 }
4188 
4189 void MetaspaceUtils::verify_free_chunks() {
4190   Metaspace::chunk_manager_metadata()->verify();
4191   if (Metaspace::using_class_space()) {
4192     Metaspace::chunk_manager_class()->verify();
4193   }
4194 }
4195 
4196 void MetaspaceUtils::verify_metrics() {
4197 #ifdef ASSERT
4198   // Please note: there are time windows where the internal counters are out of sync with
4199   // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
4200   // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
4201   // not be counted when iterating the CLDG. So be careful when you call this method.
4202   ClassLoaderMetaspaceStatistics total_stat;
4203   collect_statistics(&total_stat);
4204   UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
4205   UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
4206 
4207   bool mismatch = false;
4208   for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
4209     Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
4210     UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
4211     if (capacity_words(mdtype) != chunk_stat.cap() ||
4212         used_words(mdtype) != chunk_stat.used() ||
4213         overhead_words(mdtype) != chunk_stat.overhead()) {
4214       mismatch = true;
4215       tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
4216       tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4217                     capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
4218       tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4219                     chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
4220       tty->flush();
4221     }
4222   }
4223   assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
4224 #endif
4225 }
4226 
4227 
4228 // Metaspace methods
4229 
4230 size_t Metaspace::_first_chunk_word_size = 0;
4231 size_t Metaspace::_first_class_chunk_word_size = 0;
4232 
4233 size_t Metaspace::_commit_alignment = 0;
4234 size_t Metaspace::_reserve_alignment = 0;
4235 
4236 VirtualSpaceList* Metaspace::_space_list = NULL;
4237 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4238 
4239 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4240 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4241 
4242 #define VIRTUALSPACEMULTIPLIER 2
4243 
4244 #ifdef _LP64
4245 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4246 
4247 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4248   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4249   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4250   // narrow_klass_base is the lower of the metaspace base and the cds base
4251   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4252   // between the lower base and higher address.
4253   address lower_base;
4254   address higher_address;
4255 #if INCLUDE_CDS
4256   if (UseSharedSpaces) {
4257     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4258                           (address)(metaspace_base + compressed_class_space_size()));
4259     lower_base = MIN2(metaspace_base, cds_base);
4260   } else
4261 #endif
4262   {
4263     higher_address = metaspace_base + compressed_class_space_size();
4264     lower_base = metaspace_base;
4265 
4266     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4267     // If compressed class space fits in lower 32G, we don't need a base.
4268     if (higher_address <= (address)klass_encoding_max) {
4269       lower_base = 0; // Effectively lower base is zero.
4270     }
4271   }
4272 
4273   Universe::set_narrow_klass_base(lower_base);
4274 
4275   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4276   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4277   // how dump time narrow_klass_shift is set. Although, CDS can work
4278   // with zero-shift mode also, to be consistent with AOT it uses
4279   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4280   // can be used at same time as AOT code.
4281   if (!UseSharedSpaces
4282       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4283     Universe::set_narrow_klass_shift(0);
4284   } else {
4285     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4286   }
4287   AOTLoader::set_narrow_klass_shift();
4288 }
4289 
4290 #if INCLUDE_CDS
4291 // Return TRUE if the specified metaspace_base and cds_base are close enough
4292 // to work with compressed klass pointers.
4293 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4294   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4295   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4296   address lower_base = MIN2((address)metaspace_base, cds_base);
4297   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4298                                 (address)(metaspace_base + compressed_class_space_size()));
4299   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4300 }
4301 #endif
4302 
4303 // Try to allocate the metaspace at the requested addr.
4304 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4305   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4306   assert(using_class_space(), "called improperly");
4307   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4308   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4309          "Metaspace size is too big");
4310   assert_is_aligned(requested_addr, _reserve_alignment);
4311   assert_is_aligned(cds_base, _reserve_alignment);
4312   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4313 
4314   // Don't use large pages for the class space.
4315   bool large_pages = false;
4316 
4317 #if !(defined(AARCH64) || defined(AIX))
4318   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4319                                              _reserve_alignment,
4320                                              large_pages,
4321                                              requested_addr);
4322 #else // AARCH64
4323   ReservedSpace metaspace_rs;
4324 
4325   // Our compressed klass pointers may fit nicely into the lower 32
4326   // bits.
4327   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4328     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4329                                  _reserve_alignment,
4330                                  large_pages,
4331                                  requested_addr);
4332   }
4333 
4334   if (! metaspace_rs.is_reserved()) {
4335     // Aarch64: Try to align metaspace so that we can decode a compressed
4336     // klass with a single MOVK instruction.  We can do this iff the
4337     // compressed class base is a multiple of 4G.
4338     // Aix: Search for a place where we can find memory. If we need to load
4339     // the base, 4G alignment is helpful, too.
4340     size_t increment = AARCH64_ONLY(4*)G;
4341     for (char *a = align_up(requested_addr, increment);
4342          a < (char*)(1024*G);
4343          a += increment) {
4344       if (a == (char *)(32*G)) {
4345         // Go faster from here on. Zero-based is no longer possible.
4346         increment = 4*G;
4347       }
4348 
4349 #if INCLUDE_CDS
4350       if (UseSharedSpaces
4351           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4352         // We failed to find an aligned base that will reach.  Fall
4353         // back to using our requested addr.
4354         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4355                                      _reserve_alignment,
4356                                      large_pages,
4357                                      requested_addr);
4358         break;
4359       }
4360 #endif
4361 
4362       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4363                                    _reserve_alignment,
4364                                    large_pages,
4365                                    a);
4366       if (metaspace_rs.is_reserved())
4367         break;
4368     }
4369   }
4370 
4371 #endif // AARCH64
4372 
4373   if (!metaspace_rs.is_reserved()) {
4374 #if INCLUDE_CDS
4375     if (UseSharedSpaces) {
4376       size_t increment = align_up(1*G, _reserve_alignment);
4377 
4378       // Keep trying to allocate the metaspace, increasing the requested_addr
4379       // by 1GB each time, until we reach an address that will no longer allow
4380       // use of CDS with compressed klass pointers.
4381       char *addr = requested_addr;
4382       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4383              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4384         addr = addr + increment;
4385         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4386                                      _reserve_alignment, large_pages, addr);
4387       }
4388     }
4389 #endif
4390     // If no successful allocation then try to allocate the space anywhere.  If
4391     // that fails then OOM doom.  At this point we cannot try allocating the
4392     // metaspace as if UseCompressedClassPointers is off because too much
4393     // initialization has happened that depends on UseCompressedClassPointers.
4394     // So, UseCompressedClassPointers cannot be turned off at this point.
4395     if (!metaspace_rs.is_reserved()) {
4396       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4397                                    _reserve_alignment, large_pages);
4398       if (!metaspace_rs.is_reserved()) {
4399         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4400                                               compressed_class_space_size()));
4401       }
4402     }
4403   }
4404 
4405   // If we got here then the metaspace got allocated.
4406   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4407 
4408 #if INCLUDE_CDS
4409   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4410   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4411     FileMapInfo::stop_sharing_and_unmap(
4412         "Could not allocate metaspace at a compatible address");
4413   }
4414 #endif
4415   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4416                                   UseSharedSpaces ? (address)cds_base : 0);
4417 
4418   initialize_class_space(metaspace_rs);
4419 
4420   LogTarget(Trace, gc, metaspace) lt;
4421   if (lt.is_enabled()) {
4422     ResourceMark rm;
4423     LogStream ls(lt);
4424     print_compressed_class_space(&ls, requested_addr);
4425     ls.cr(); // ~LogStream does not autoflush.
4426   }
4427 }
4428 
4429 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4430   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4431                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4432   if (_class_space_list != NULL) {
4433     address base = (address)_class_space_list->current_virtual_space()->bottom();
4434     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4435                  compressed_class_space_size(), p2i(base));
4436     if (requested_addr != 0) {
4437       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4438     }
4439     st->cr();
4440   }
4441 }
4442 
4443 // For UseCompressedClassPointers the class space is reserved above the top of
4444 // the Java heap.  The argument passed in is at the base of the compressed space.
4445 void Metaspace::initialize_class_space(ReservedSpace rs) {
4446   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4447   assert(rs.size() >= CompressedClassSpaceSize,
4448          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4449   assert(using_class_space(), "Must be using class space");
4450   _class_space_list = new VirtualSpaceList(rs);
4451   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4452 
4453   if (!_class_space_list->initialization_succeeded()) {
4454     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4455   }
4456 }
4457 
4458 #endif
4459 
4460 void Metaspace::ergo_initialize() {
4461   if (DumpSharedSpaces) {
4462     // Using large pages when dumping the shared archive is currently not implemented.
4463     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4464   }
4465 
4466   size_t page_size = os::vm_page_size();
4467   if (UseLargePages && UseLargePagesInMetaspace) {
4468     page_size = os::large_page_size();
4469   }
4470 
4471   _commit_alignment  = page_size;
4472   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4473 
4474   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4475   // override if MaxMetaspaceSize was set on the command line or not.
4476   // This information is needed later to conform to the specification of the
4477   // java.lang.management.MemoryUsage API.
4478   //
4479   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4480   // globals.hpp to the aligned value, but this is not possible, since the
4481   // alignment depends on other flags being parsed.
4482   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4483 
4484   if (MetaspaceSize > MaxMetaspaceSize) {
4485     MetaspaceSize = MaxMetaspaceSize;
4486   }
4487 
4488   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4489 
4490   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4491 
4492   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4493   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4494 
4495   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4496 
4497   // Initial virtual space size will be calculated at global_initialize()
4498   size_t min_metaspace_sz =
4499       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4500   if (UseCompressedClassPointers) {
4501     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4502       if (min_metaspace_sz >= MaxMetaspaceSize) {
4503         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4504       } else {
4505         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4506                       MaxMetaspaceSize - min_metaspace_sz);
4507       }
4508     }
4509   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4510     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4511                   min_metaspace_sz);
4512   }
4513 
4514   set_compressed_class_space_size(CompressedClassSpaceSize);
4515 }
4516 
4517 void Metaspace::global_initialize() {
4518   MetaspaceGC::initialize();
4519 
4520 #if INCLUDE_CDS
4521   if (DumpSharedSpaces) {
4522     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4523   } else if (UseSharedSpaces) {
4524     // If any of the archived space fails to map, UseSharedSpaces
4525     // is reset to false. Fall through to the
4526     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4527     // metaspace.
4528     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4529   }
4530 
4531   if (!DumpSharedSpaces && !UseSharedSpaces)
4532 #endif // INCLUDE_CDS
4533   {
4534 #ifdef _LP64
4535     if (using_class_space()) {
4536       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4537       allocate_metaspace_compressed_klass_ptrs(base, 0);
4538     }
4539 #endif // _LP64
4540   }
4541 
4542   // Initialize these before initializing the VirtualSpaceList
4543   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4544   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4545   // Make the first class chunk bigger than a medium chunk so it's not put
4546   // on the medium chunk list.   The next chunk will be small and progress
4547   // from there.  This size calculated by -version.
4548   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4549                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4550   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4551   // Arbitrarily set the initial virtual space to a multiple
4552   // of the boot class loader size.
4553   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4554   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4555 
4556   // Initialize the list of virtual spaces.
4557   _space_list = new VirtualSpaceList(word_size);
4558   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4559 
4560   if (!_space_list->initialization_succeeded()) {
4561     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4562   }
4563 
4564   _tracer = new MetaspaceTracer();
4565 }
4566 
4567 void Metaspace::post_initialize() {
4568   MetaspaceGC::post_initialize();
4569 }
4570 
4571 void Metaspace::verify_global_initialization() {
4572   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4573   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4574 
4575   if (using_class_space()) {
4576     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4577     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4578   }
4579 }
4580 
4581 size_t Metaspace::align_word_size_up(size_t word_size) {
4582   size_t byte_size = word_size * wordSize;
4583   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4584 }
4585 
4586 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4587                               MetaspaceObj::Type type, TRAPS) {
4588   assert(!_frozen, "sanity");
4589   if (HAS_PENDING_EXCEPTION) {
4590     assert(false, "Should not allocate with exception pending");
4591     return NULL;  // caller does a CHECK_NULL too
4592   }
4593 
4594   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4595         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4596 
4597   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4598 
4599   // Try to allocate metadata.
4600   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4601 
4602   if (result == NULL) {
4603     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4604 
4605     // Allocation failed.
4606     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
4607       // Only start a GC if the bootstrapping has completed.
4608       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
4609       // the VM thread.
4610 
4611       // Try to clean out some memory and retry.
4612       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4613     }
4614   }
4615 
4616   if (result == NULL) {
4617     if (DumpSharedSpaces) {
4618       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
4619       // We should abort to avoid generating a potentially bad archive.
4620       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4621           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4622       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
4623       vm_exit(1);
4624     }
4625     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4626   }
4627 
4628   // Zero initialize.
4629   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4630 
4631   return result;
4632 }
4633 
4634 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4635   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4636 
4637   // If result is still null, we are out of memory.
4638   Log(gc, metaspace, freelist) log;
4639   if (log.is_info()) {
4640     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4641              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4642     ResourceMark rm;
4643     if (log.is_debug()) {
4644       if (loader_data->metaspace_or_null() != NULL) {
4645         LogStream ls(log.debug());
4646         loader_data->print_value_on(&ls);
4647         ls.cr(); // ~LogStream does not autoflush.
4648       }
4649     }
4650     LogStream ls(log.info());
4651     // In case of an OOM, log out a short but still useful report.
4652     MetaspaceUtils::print_basic_report(&ls, 0);
4653     ls.cr(); // ~LogStream does not autoflush.
4654   }
4655 
4656   bool out_of_compressed_class_space = false;
4657   if (is_class_space_allocation(mdtype)) {
4658     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4659     out_of_compressed_class_space =
4660       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4661       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4662       CompressedClassSpaceSize;
4663   }
4664 
4665   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4666   const char* space_string = out_of_compressed_class_space ?
4667     "Compressed class space" : "Metaspace";
4668 
4669   report_java_out_of_memory(space_string);
4670 
4671   if (JvmtiExport::should_post_resource_exhausted()) {
4672     JvmtiExport::post_resource_exhausted(
4673         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4674         space_string);
4675   }
4676 
4677   if (!is_init_completed()) {
4678     vm_exit_during_initialization("OutOfMemoryError", space_string);
4679   }
4680 
4681   if (out_of_compressed_class_space) {
4682     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4683   } else {
4684     THROW_OOP(Universe::out_of_memory_error_metaspace());
4685   }
4686 }
4687 
4688 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4689   switch (mdtype) {
4690     case Metaspace::ClassType: return "Class";
4691     case Metaspace::NonClassType: return "Metadata";
4692     default:
4693       assert(false, "Got bad mdtype: %d", (int) mdtype);
4694       return NULL;
4695   }
4696 }
4697 
4698 void Metaspace::purge(MetadataType mdtype) {
4699   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4700 }
4701 
4702 void Metaspace::purge() {
4703   MutexLockerEx cl(MetaspaceExpand_lock,
4704                    Mutex::_no_safepoint_check_flag);
4705   purge(NonClassType);
4706   if (using_class_space()) {
4707     purge(ClassType);
4708   }
4709 }
4710 
4711 bool Metaspace::contains(const void* ptr) {
4712   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4713     return true;
4714   }
4715   return contains_non_shared(ptr);
4716 }
4717 
4718 bool Metaspace::contains_non_shared(const void* ptr) {
4719   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4720      return true;
4721   }
4722 
4723   return get_space_list(NonClassType)->contains(ptr);
4724 }
4725 
4726 // ClassLoaderMetaspace
4727 
4728 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
4729   : _lock(lock)
4730   , _space_type(type)
4731   , _vsm(NULL)
4732   , _class_vsm(NULL)
4733 {
4734   initialize(lock, type);
4735 }
4736 
4737 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4738   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
4739   delete _vsm;
4740   if (Metaspace::using_class_space()) {
4741     delete _class_vsm;
4742   }
4743 }
4744 
4745 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4746   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4747   if (chunk != NULL) {
4748     // Add to this manager's list of chunks in use and make it the current_chunk().
4749     get_space_manager(mdtype)->add_chunk(chunk, true);
4750   }
4751 }
4752 
4753 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4754   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4755 
4756   // Get a chunk from the chunk freelist
4757   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4758 
4759   if (chunk == NULL) {
4760     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4761                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4762   }
4763 
4764   return chunk;
4765 }
4766 
4767 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4768   Metaspace::verify_global_initialization();
4769 
4770   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
4771 
4772   // Allocate SpaceManager for metadata objects.
4773   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4774 
4775   if (Metaspace::using_class_space()) {
4776     // Allocate SpaceManager for classes.
4777     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4778   }
4779 
4780   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4781 
4782   // Allocate chunk for metadata objects
4783   initialize_first_chunk(type, Metaspace::NonClassType);
4784 
4785   // Allocate chunk for class metadata objects
4786   if (Metaspace::using_class_space()) {
4787     initialize_first_chunk(type, Metaspace::ClassType);
4788   }
4789 }
4790 
4791 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4792   Metaspace::assert_not_frozen();
4793 
4794   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
4795 
4796   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4797   if (Metaspace::is_class_space_allocation(mdtype)) {
4798     return  class_vsm()->allocate(word_size);
4799   } else {
4800     return  vsm()->allocate(word_size);
4801   }
4802 }
4803 
4804 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4805   Metaspace::assert_not_frozen();
4806   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4807   assert(delta_bytes > 0, "Must be");
4808 
4809   size_t before = 0;
4810   size_t after = 0;
4811   MetaWord* res;
4812   bool incremented;
4813 
4814   // Each thread increments the HWM at most once. Even if the thread fails to increment
4815   // the HWM, an allocation is still attempted. This is because another thread must then
4816   // have incremented the HWM and therefore the allocation might still succeed.
4817   do {
4818     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4819     res = allocate(word_size, mdtype);
4820   } while (!incremented && res == NULL);
4821 
4822   if (incremented) {
4823     Metaspace::tracer()->report_gc_threshold(before, after,
4824                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4825     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4826   }
4827 
4828   return res;
4829 }
4830 
4831 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4832   return (vsm()->used_words() +
4833       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
4834 }
4835 
4836 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4837   return (vsm()->capacity_words() +
4838       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
4839 }
4840 
4841 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4842   Metaspace::assert_not_frozen();
4843   assert(!SafepointSynchronize::is_at_safepoint()
4844          || Thread::current()->is_VM_thread(), "should be the VM thread");
4845 
4846   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
4847 
4848   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4849 
4850   if (is_class && Metaspace::using_class_space()) {
4851     class_vsm()->deallocate(ptr, word_size);
4852   } else {
4853     vsm()->deallocate(ptr, word_size);
4854   }
4855 }
4856 
4857 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4858   assert(Metaspace::using_class_space(), "Has to use class space");
4859   return class_vsm()->calc_chunk_size(word_size);
4860 }
4861 
4862 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4863   // Print both class virtual space counts and metaspace.
4864   if (Verbose) {
4865     vsm()->print_on(out);
4866     if (Metaspace::using_class_space()) {
4867       class_vsm()->print_on(out);
4868     }
4869   }
4870 }
4871 
4872 void ClassLoaderMetaspace::verify() {
4873   vsm()->verify();
4874   if (Metaspace::using_class_space()) {
4875     class_vsm()->verify();
4876   }
4877 }
4878 
4879 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
4880   assert_lock_strong(lock());
4881   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
4882   if (Metaspace::using_class_space()) {
4883     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
4884   }
4885 }
4886 
4887 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
4888   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4889   add_to_statistics_locked(out);
4890 }
4891 
4892 #ifdef ASSERT
4893 static void do_verify_chunk(Metachunk* chunk) {
4894   guarantee(chunk != NULL, "Sanity");
4895   // Verify chunk itself; then verify that it is consistent with the
4896   // occupany map of its containing node.
4897   chunk->verify();
4898   VirtualSpaceNode* const vsn = chunk->container();
4899   OccupancyMap* const ocmap = vsn->occupancy_map();
4900   ocmap->verify_for_chunk(chunk);
4901 }
4902 #endif
4903 
4904 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4905   chunk->set_is_tagged_free(!inuse);
4906   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4907   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4908 }
4909 
4910 /////////////// Unit tests ///////////////
4911 
4912 #ifndef PRODUCT
4913 
4914 class TestMetaspaceUtilsTest : AllStatic {
4915  public:
4916   static void test_reserved() {
4917     size_t reserved = MetaspaceUtils::reserved_bytes();
4918 
4919     assert(reserved > 0, "assert");
4920 
4921     size_t committed  = MetaspaceUtils::committed_bytes();
4922     assert(committed <= reserved, "assert");
4923 
4924     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
4925     assert(reserved_metadata > 0, "assert");
4926     assert(reserved_metadata <= reserved, "assert");
4927 
4928     if (UseCompressedClassPointers) {
4929       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
4930       assert(reserved_class > 0, "assert");
4931       assert(reserved_class < reserved, "assert");
4932     }
4933   }
4934 
4935   static void test_committed() {
4936     size_t committed = MetaspaceUtils::committed_bytes();
4937 
4938     assert(committed > 0, "assert");
4939 
4940     size_t reserved  = MetaspaceUtils::reserved_bytes();
4941     assert(committed <= reserved, "assert");
4942 
4943     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
4944     assert(committed_metadata > 0, "assert");
4945     assert(committed_metadata <= committed, "assert");
4946 
4947     if (UseCompressedClassPointers) {
4948       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
4949       assert(committed_class > 0, "assert");
4950       assert(committed_class < committed, "assert");
4951     }
4952   }
4953 
4954   static void test_virtual_space_list_large_chunk() {
4955     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
4956     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4957     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
4958     // vm_allocation_granularity aligned on Windows.
4959     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
4960     large_size += (os::vm_page_size()/BytesPerWord);
4961     vs_list->get_new_chunk(large_size, 0);
4962   }
4963 
4964   static void test() {
4965     test_reserved();
4966     test_committed();
4967     test_virtual_space_list_large_chunk();
4968   }
4969 };
4970 
4971 void TestMetaspaceUtils_test() {
4972   TestMetaspaceUtilsTest::test();
4973 }
4974 
4975 class TestVirtualSpaceNodeTest {
4976   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
4977                                           size_t& num_small_chunks,
4978                                           size_t& num_specialized_chunks) {
4979     num_medium_chunks = words_left / MediumChunk;
4980     words_left = words_left % MediumChunk;
4981 
4982     num_small_chunks = words_left / SmallChunk;
4983     words_left = words_left % SmallChunk;
4984     // how many specialized chunks can we get?
4985     num_specialized_chunks = words_left / SpecializedChunk;
4986     assert(words_left % SpecializedChunk == 0, "should be nothing left");
4987   }
4988 
4989  public:
4990   static void test() {
4991     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4992     const size_t vsn_test_size_words = MediumChunk  * 4;
4993     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
4994 
4995     // The chunk sizes must be multiples of eachother, or this will fail
4996     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
4997     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
4998 
4999     { // No committed memory in VSN
5000       ChunkManager cm(false);
5001       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5002       vsn.initialize();
5003       vsn.retire(&cm);
5004       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5005     }
5006 
5007     { // All of VSN is committed, half is used by chunks
5008       ChunkManager cm(false);
5009       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5010       vsn.initialize();
5011       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5012       vsn.get_chunk_vs(MediumChunk);
5013       vsn.get_chunk_vs(MediumChunk);
5014       vsn.retire(&cm);
5015       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5016       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5017     }
5018 
5019     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5020     // This doesn't work for systems with vm_page_size >= 16K.
5021     if (page_chunks < MediumChunk) {
5022       // 4 pages of VSN is committed, some is used by chunks
5023       ChunkManager cm(false);
5024       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5025 
5026       vsn.initialize();
5027       vsn.expand_by(page_chunks, page_chunks);
5028       vsn.get_chunk_vs(SmallChunk);
5029       vsn.get_chunk_vs(SpecializedChunk);
5030       vsn.retire(&cm);
5031 
5032       // committed - used = words left to retire
5033       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5034 
5035       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5036       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5037 
5038       assert(num_medium_chunks == 0, "should not get any medium chunks");
5039       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5040       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5041     }
5042 
5043     { // Half of VSN is committed, a humongous chunk is used
5044       ChunkManager cm(false);
5045       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5046       vsn.initialize();
5047       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5048       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5049       vsn.retire(&cm);
5050 
5051       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5052       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5053       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5054 
5055       assert(num_medium_chunks == 0, "should not get any medium chunks");
5056       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5057       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5058     }
5059 
5060   }
5061 
5062 #define assert_is_available_positive(word_size) \
5063   assert(vsn.is_available(word_size), \
5064          #word_size ": " PTR_FORMAT " bytes were not available in " \
5065          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5066          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5067 
5068 #define assert_is_available_negative(word_size) \
5069   assert(!vsn.is_available(word_size), \
5070          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5071          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5072          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5073 
5074   static void test_is_available_positive() {
5075     // Reserve some memory.
5076     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5077     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5078 
5079     // Commit some memory.
5080     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5081     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5082     assert(expanded, "Failed to commit");
5083 
5084     // Check that is_available accepts the committed size.
5085     assert_is_available_positive(commit_word_size);
5086 
5087     // Check that is_available accepts half the committed size.
5088     size_t expand_word_size = commit_word_size / 2;
5089     assert_is_available_positive(expand_word_size);
5090   }
5091 
5092   static void test_is_available_negative() {
5093     // Reserve some memory.
5094     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5095     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5096 
5097     // Commit some memory.
5098     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5099     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5100     assert(expanded, "Failed to commit");
5101 
5102     // Check that is_available doesn't accept a too large size.
5103     size_t two_times_commit_word_size = commit_word_size * 2;
5104     assert_is_available_negative(two_times_commit_word_size);
5105   }
5106 
5107   static void test_is_available_overflow() {
5108     // Reserve some memory.
5109     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5110     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5111 
5112     // Commit some memory.
5113     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5114     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5115     assert(expanded, "Failed to commit");
5116 
5117     // Calculate a size that will overflow the virtual space size.
5118     void* virtual_space_max = (void*)(uintptr_t)-1;
5119     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5120     size_t overflow_size = bottom_to_max + BytesPerWord;
5121     size_t overflow_word_size = overflow_size / BytesPerWord;
5122 
5123     // Check that is_available can handle the overflow.
5124     assert_is_available_negative(overflow_word_size);
5125   }
5126 
5127   static void test_is_available() {
5128     TestVirtualSpaceNodeTest::test_is_available_positive();
5129     TestVirtualSpaceNodeTest::test_is_available_negative();
5130     TestVirtualSpaceNodeTest::test_is_available_overflow();
5131   }
5132 };
5133 
5134 // The following test is placed here instead of a gtest / unittest file
5135 // because the ChunkManager class is only available in this file.
5136 void ChunkManager_test_list_index() {
5137   {
5138     // Test previous bug where a query for a humongous class metachunk,
5139     // incorrectly matched the non-class medium metachunk size.
5140     {
5141       ChunkManager manager(true);
5142 
5143       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5144 
5145       ChunkIndex index = manager.list_index(MediumChunk);
5146 
5147       assert(index == HumongousIndex,
5148           "Requested size is larger than ClassMediumChunk,"
5149           " so should return HumongousIndex. Got index: %d", (int)index);
5150     }
5151 
5152     // Check the specified sizes as well.
5153     {
5154       ChunkManager manager(true);
5155       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5156       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5157       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5158       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5159     }
5160     {
5161       ChunkManager manager(false);
5162       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5163       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5164       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5165       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5166     }
5167 
5168   }
5169 
5170 }
5171 
5172 #endif // !PRODUCT
5173 
5174 #ifdef ASSERT
5175 
5176 // The following test is placed here instead of a gtest / unittest file
5177 // because the ChunkManager class is only available in this file.
5178 class SpaceManagerTest : AllStatic {
5179   friend void SpaceManager_test_adjust_initial_chunk_size();
5180 
5181   static void test_adjust_initial_chunk_size(bool is_class) {
5182     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5183     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5184     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5185 
5186 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5187     do {                                                                         \
5188       size_t v = value;                                                          \
5189       size_t e = expected;                                                       \
5190       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5191              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5192     } while (0)
5193 
5194     // Smallest (specialized)
5195     test_adjust_initial_chunk_size(1,            smallest, is_class);
5196     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5197     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5198 
5199     // Small
5200     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5201     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5202     test_adjust_initial_chunk_size(normal,       normal, is_class);
5203 
5204     // Medium
5205     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5206     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5207     test_adjust_initial_chunk_size(medium,     medium, is_class);
5208 
5209     // Humongous
5210     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5211 
5212 #undef test_adjust_initial_chunk_size
5213   }
5214 
5215   static void test_adjust_initial_chunk_size() {
5216     test_adjust_initial_chunk_size(false);
5217     test_adjust_initial_chunk_size(true);
5218   }
5219 };
5220 
5221 void SpaceManager_test_adjust_initial_chunk_size() {
5222   SpaceManagerTest::test_adjust_initial_chunk_size();
5223 }
5224 
5225 #endif // ASSERT
5226 
5227 struct chunkmanager_statistics_t {
5228   int num_specialized_chunks;
5229   int num_small_chunks;
5230   int num_medium_chunks;
5231   int num_humongous_chunks;
5232 };
5233 
5234 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5235   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5236   ChunkManagerStatistics stat;
5237   chunk_manager->collect_statistics(&stat);
5238   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5239   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5240   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5241   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5242 }
5243 
5244 struct chunk_geometry_t {
5245   size_t specialized_chunk_word_size;
5246   size_t small_chunk_word_size;
5247   size_t medium_chunk_word_size;
5248 };
5249 
5250 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5251   if (mdType == Metaspace::NonClassType) {
5252     out->specialized_chunk_word_size = SpecializedChunk;
5253     out->small_chunk_word_size = SmallChunk;
5254     out->medium_chunk_word_size = MediumChunk;
5255   } else {
5256     out->specialized_chunk_word_size = ClassSpecializedChunk;
5257     out->small_chunk_word_size = ClassSmallChunk;
5258     out->medium_chunk_word_size = ClassMediumChunk;
5259   }
5260 }