1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/metaspaceStatistics.hpp"
  38 #include "memory/metaspaceGCThresholdUpdater.hpp"
  39 #include "memory/metaspaceShared.hpp"
  40 #include "memory/metaspaceTracer.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutex.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "runtime/orderAccess.inline.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/macros.hpp"
  57 
  58 using namespace metaspace::internals;
  59 
  60 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  61 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  62 
  63 // Helper function that does a bunch of checks for a chunk.
  64 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  65 
  66 // Given a Metachunk, update its in-use information (both in the
  67 // chunk and the occupancy map).
  68 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  69 
  70 size_t const allocation_from_dictionary_limit = 4 * K;
  71 
  72 MetaWord* last_allocated = 0;
  73 
  74 size_t Metaspace::_compressed_class_space_size;
  75 const MetaspaceTracer* Metaspace::_tracer = NULL;
  76 
  77 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  78 
  79 // Internal statistics.
  80 #ifdef ASSERT
  81 static struct {
  82   // Number of allocations.
  83   uintx num_allocs;
  84   // Number of times a ClassLoaderMetaspace was born...
  85   uintx num_metaspace_births;
  86   // ... and died.
  87   uintx num_metaspace_deaths;
  88   // Number of times VirtualSpaceListNodes were created...
  89   uintx num_vsnodes_created;
  90   // ... and purged.
  91   uintx num_vsnodes_purged;
  92   // Number of times we expanded the committed section of the space.
  93   uintx num_committed_space_expanded;
  94   // Number of deallocations
  95   uintx num_deallocs;
  96   // Number of deallocations triggered from outside ("real" deallocations).
  97   uintx num_external_deallocs;
  98   // Number of times an allocation was satisfied from deallocated blocks.
  99   uintx num_allocs_from_deallocated_blocks;
 100 } g_internal_statistics;
 101 #endif
 102 
 103 enum ChunkSizes {    // in words.
 104   ClassSpecializedChunk = 128,
 105   SpecializedChunk = 128,
 106   ClassSmallChunk = 256,
 107   SmallChunk = 512,
 108   ClassMediumChunk = 4 * K,
 109   MediumChunk = 8 * K
 110 };
 111 
 112 // Returns size of this chunk type.
 113 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
 114   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
 115   size_t size = 0;
 116   if (is_class) {
 117     switch(chunktype) {
 118       case SpecializedIndex: size = ClassSpecializedChunk; break;
 119       case SmallIndex: size = ClassSmallChunk; break;
 120       case MediumIndex: size = ClassMediumChunk; break;
 121       default:
 122         ShouldNotReachHere();
 123     }
 124   } else {
 125     switch(chunktype) {
 126       case SpecializedIndex: size = SpecializedChunk; break;
 127       case SmallIndex: size = SmallChunk; break;
 128       case MediumIndex: size = MediumChunk; break;
 129       default:
 130         ShouldNotReachHere();
 131     }
 132   }
 133   return size;
 134 }
 135 
 136 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 137   if (is_class) {
 138     if (size == ClassSpecializedChunk) {
 139       return SpecializedIndex;
 140     } else if (size == ClassSmallChunk) {
 141       return SmallIndex;
 142     } else if (size == ClassMediumChunk) {
 143       return MediumIndex;
 144     } else if (size > ClassMediumChunk) {
 145       // A valid humongous chunk size is a multiple of the smallest chunk size.
 146       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 147       return HumongousIndex;
 148     }
 149   } else {
 150     if (size == SpecializedChunk) {
 151       return SpecializedIndex;
 152     } else if (size == SmallChunk) {
 153       return SmallIndex;
 154     } else if (size == MediumChunk) {
 155       return MediumIndex;
 156     } else if (size > MediumChunk) {
 157       // A valid humongous chunk size is a multiple of the smallest chunk size.
 158       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 159       return HumongousIndex;
 160     }
 161   }
 162   ShouldNotReachHere();
 163   return (ChunkIndex)-1;
 164 }
 165 
 166 ChunkIndex next_chunk_index(ChunkIndex i) {
 167   assert(i < NumberOfInUseLists, "Out of bound");
 168   return (ChunkIndex) (i+1);
 169 }
 170 
 171 ChunkIndex prev_chunk_index(ChunkIndex i) {
 172   assert(i > ZeroIndex, "Out of bound");
 173   return (ChunkIndex) (i-1);
 174 }
 175 
 176 static const char* space_type_name(Metaspace::MetaspaceType t) {
 177   const char* s = NULL;
 178   switch (t) {
 179     case Metaspace::StandardMetaspaceType: s = "Standard"; break;
 180     case Metaspace::BootMetaspaceType: s = "Boot"; break;
 181     case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
 182     case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
 183     default: ShouldNotReachHere();
 184   }
 185   return s;
 186 }
 187 
 188 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 189 uint MetaspaceGC::_shrink_factor = 0;
 190 bool MetaspaceGC::_should_concurrent_collect = false;
 191 
 192 
 193 typedef class FreeList<Metachunk> ChunkList;
 194 
 195 // Manages the global free lists of chunks.
 196 class ChunkManager : public CHeapObj<mtInternal> {
 197   friend class TestVirtualSpaceNodeTest;
 198 
 199   // Free list of chunks of different sizes.
 200   //   SpecializedChunk
 201   //   SmallChunk
 202   //   MediumChunk
 203   ChunkList _free_chunks[NumberOfFreeLists];
 204 
 205   // Whether or not this is the class chunkmanager.
 206   const bool _is_class;
 207 
 208   // Return non-humongous chunk list by its index.
 209   ChunkList* free_chunks(ChunkIndex index);
 210 
 211   // Returns non-humongous chunk list for the given chunk word size.
 212   ChunkList* find_free_chunks_list(size_t word_size);
 213 
 214   //   HumongousChunk
 215   ChunkTreeDictionary _humongous_dictionary;
 216 
 217   // Returns the humongous chunk dictionary.
 218   ChunkTreeDictionary* humongous_dictionary() {
 219     return &_humongous_dictionary;
 220   }
 221 
 222   // Size, in metaspace words, of all chunks managed by this ChunkManager
 223   size_t _free_chunks_total;
 224   // Number of chunks in this ChunkManager
 225   size_t _free_chunks_count;
 226 
 227   // Update counters after a chunk was added or removed removed.
 228   void account_for_added_chunk(const Metachunk* c);
 229   void account_for_removed_chunk(const Metachunk* c);
 230 
 231   // Debug support
 232 
 233   size_t sum_free_chunks();
 234   size_t sum_free_chunks_count();
 235 
 236   void locked_verify_free_chunks_total();
 237   void slow_locked_verify_free_chunks_total() {
 238     if (VerifyMetaspace) {
 239       locked_verify_free_chunks_total();
 240     }
 241   }
 242   void locked_verify_free_chunks_count();
 243   void slow_locked_verify_free_chunks_count() {
 244     if (VerifyMetaspace) {
 245       locked_verify_free_chunks_count();
 246     }
 247   }
 248 
 249   // Given a pointer to a chunk, attempts to merge it with neighboring
 250   // free chunks to form a bigger chunk. Returns true if successful.
 251   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 252 
 253   // Helper for chunk merging:
 254   //  Given an address range with 1-n chunks which are all supposed to be
 255   //  free and hence currently managed by this ChunkManager, remove them
 256   //  from this ChunkManager and mark them as invalid.
 257   // - This does not correct the occupancy map.
 258   // - This does not adjust the counters in ChunkManager.
 259   // - Does not adjust container count counter in containing VirtualSpaceNode.
 260   // Returns number of chunks removed.
 261   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 262 
 263   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 264   // split up the larger chunk into n smaller chunks, at least one of which should be
 265   // the target chunk of target chunk size. The smaller chunks, including the target
 266   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 267   // Note that this chunk is supposed to be removed from the freelist right away.
 268   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 269 
 270  public:
 271 
 272   ChunkManager(bool is_class)
 273       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 274     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 275     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 276     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 277   }
 278 
 279   // Add or delete (return) a chunk to the global freelist.
 280   Metachunk* chunk_freelist_allocate(size_t word_size);
 281 
 282   // Map a size to a list index assuming that there are lists
 283   // for special, small, medium, and humongous chunks.
 284   ChunkIndex list_index(size_t size);
 285 
 286   // Map a given index to the chunk size.
 287   size_t size_by_index(ChunkIndex index) const;
 288 
 289   bool is_class() const { return _is_class; }
 290 
 291   // Convenience accessors.
 292   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 293   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 294   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 295 
 296   // Take a chunk from the ChunkManager. The chunk is expected to be in
 297   // the chunk manager (the freelist if non-humongous, the dictionary if
 298   // humongous).
 299   void remove_chunk(Metachunk* chunk);
 300 
 301   // Return a single chunk of type index to the ChunkManager.
 302   void return_single_chunk(Metachunk* chunk);
 303 
 304   // Add the simple linked list of chunks to the freelist of chunks
 305   // of type index.
 306   void return_chunk_list(Metachunk* chunks);
 307 
 308   // Total of the space in the free chunks list
 309   size_t free_chunks_total_words();
 310   size_t free_chunks_total_bytes();
 311 
 312   // Number of chunks in the free chunks list
 313   size_t free_chunks_count();
 314 
 315   // Remove from a list by size.  Selects list based on size of chunk.
 316   Metachunk* free_chunks_get(size_t chunk_word_size);
 317 
 318 #define index_bounds_check(index)                                         \
 319   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 320 
 321   size_t num_free_chunks(ChunkIndex index) const {
 322     index_bounds_check(index);
 323 
 324     if (index == HumongousIndex) {
 325       return _humongous_dictionary.total_free_blocks();
 326     }
 327 
 328     ssize_t count = _free_chunks[index].count();
 329     return count == -1 ? 0 : (size_t) count;
 330   }
 331 
 332   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 333     index_bounds_check(index);
 334 
 335     size_t word_size = 0;
 336     if (index == HumongousIndex) {
 337       word_size = _humongous_dictionary.total_size();
 338     } else {
 339       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 340       word_size = size_per_chunk_in_words * num_free_chunks(index);
 341     }
 342 
 343     return word_size * BytesPerWord;
 344   }
 345 
 346   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 347     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 348                                          num_free_chunks(SmallIndex),
 349                                          num_free_chunks(MediumIndex),
 350                                          num_free_chunks(HumongousIndex),
 351                                          size_free_chunks_in_bytes(SpecializedIndex),
 352                                          size_free_chunks_in_bytes(SmallIndex),
 353                                          size_free_chunks_in_bytes(MediumIndex),
 354                                          size_free_chunks_in_bytes(HumongousIndex));
 355   }
 356 
 357   // Debug support
 358   void verify();
 359   void slow_verify() {
 360     if (VerifyMetaspace) {
 361       verify();
 362     }
 363   }
 364   void locked_verify();
 365   void slow_locked_verify() {
 366     if (VerifyMetaspace) {
 367       locked_verify();
 368     }
 369   }
 370 
 371   void locked_print_free_chunks(outputStream* st);
 372   void locked_print_sum_free_chunks(outputStream* st);
 373 
 374   // Fill in current statistic values to the given statistics object.
 375   void collect_statistics(ChunkManagerStatistics* out) const;
 376 
 377 };
 378 
 379 class SmallBlocks : public CHeapObj<mtClass> {
 380   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 381   // Note: this corresponds to the imposed miminum allocation size, see SpaceManager::get_allocation_word_size()
 382   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 383 
 384  private:
 385   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 386 
 387   FreeList<Metablock>& list_at(size_t word_size) {
 388     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 389     return _small_lists[word_size - _small_block_min_size];
 390   }
 391 
 392  public:
 393   SmallBlocks() {
 394     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 395       uint k = i - _small_block_min_size;
 396       _small_lists[k].set_size(i);
 397     }
 398   }
 399 
 400   // Returns the total size, in words, of all blocks, across all block sizes.
 401   size_t total_size() const {
 402     size_t result = 0;
 403     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 404       uint k = i - _small_block_min_size;
 405       result = result + _small_lists[k].count() * _small_lists[k].size();
 406     }
 407     return result;
 408   }
 409 
 410   // Returns the total number of all blocks across all block sizes.
 411   uintx total_num_blocks() const {
 412     uintx result = 0;
 413     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 414       uint k = i - _small_block_min_size;
 415       result = result + _small_lists[k].count();
 416     }
 417     return result;
 418   }
 419 
 420   static uint small_block_max_size() { return _small_block_max_size; }
 421   static uint small_block_min_size() { return _small_block_min_size; }
 422 
 423   MetaWord* get_block(size_t word_size) {
 424     if (list_at(word_size).count() > 0) {
 425       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 426       return new_block;
 427     } else {
 428       return NULL;
 429     }
 430   }
 431   void return_block(Metablock* free_chunk, size_t word_size) {
 432     list_at(word_size).return_chunk_at_head(free_chunk, false);
 433     assert(list_at(word_size).count() > 0, "Should have a chunk");
 434   }
 435 
 436   void print_on(outputStream* st) const {
 437     st->print_cr("SmallBlocks:");
 438     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 439       uint k = i - _small_block_min_size;
 440       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 441     }
 442   }
 443 };
 444 
 445 // Used to manage the free list of Metablocks (a block corresponds
 446 // to the allocation of a quantum of metadata).
 447 class BlockFreelist : public CHeapObj<mtClass> {
 448   BlockTreeDictionary* const _dictionary;
 449   SmallBlocks* _small_blocks;
 450 
 451   // Only allocate and split from freelist if the size of the allocation
 452   // is at least 1/4th the size of the available block.
 453   const static int WasteMultiplier = 4;
 454 
 455   // Accessors
 456   BlockTreeDictionary* dictionary() const { return _dictionary; }
 457   SmallBlocks* small_blocks() {
 458     if (_small_blocks == NULL) {
 459       _small_blocks = new SmallBlocks();
 460     }
 461     return _small_blocks;
 462   }
 463 
 464  public:
 465   BlockFreelist();
 466   ~BlockFreelist();
 467 
 468   // Get and return a block to the free list
 469   MetaWord* get_block(size_t word_size);
 470   void return_block(MetaWord* p, size_t word_size);
 471 
 472   // Returns the total size, in words, of all blocks kept in this structure.
 473   size_t total_size() const  {
 474     size_t result = dictionary()->total_size();
 475     if (_small_blocks != NULL) {
 476       result = result + _small_blocks->total_size();
 477     }
 478     return result;
 479   }
 480 
 481   // Returns the number of all blocks kept in this structure.
 482   uintx num_blocks() const {
 483     uintx result = dictionary()->total_free_blocks();
 484     if (_small_blocks != NULL) {
 485       result = result + _small_blocks->total_num_blocks();
 486     }
 487     return result;
 488   }
 489 
 490   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 491   void print_on(outputStream* st) const;
 492 };
 493 
 494 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 495 template <typename T> struct all_ones  { static const T value; };
 496 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 497 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 498 
 499 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 500 // keeps information about
 501 // - where a chunk starts
 502 // - whether a chunk is in-use or free
 503 // A bit in this bitmap represents one range of memory in the smallest
 504 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 505 class OccupancyMap : public CHeapObj<mtInternal> {
 506 
 507   // The address range this map covers.
 508   const MetaWord* const _reference_address;
 509   const size_t _word_size;
 510 
 511   // The word size of a specialized chunk, aka the number of words one
 512   // bit in this map represents.
 513   const size_t _smallest_chunk_word_size;
 514 
 515   // map data
 516   // Data are organized in two bit layers:
 517   // The first layer is the chunk-start-map. Here, a bit is set to mark
 518   // the corresponding region as the head of a chunk.
 519   // The second layer is the in-use-map. Here, a set bit indicates that
 520   // the corresponding belongs to a chunk which is in use.
 521   uint8_t* _map[2];
 522 
 523   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 524 
 525   // length, in bytes, of bitmap data
 526   size_t _map_size;
 527 
 528   // Returns true if bit at position pos at bit-layer layer is set.
 529   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 530     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 531     const unsigned byteoffset = pos / 8;
 532     assert(byteoffset < _map_size,
 533            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 534     const unsigned mask = 1 << (pos % 8);
 535     return (_map[layer][byteoffset] & mask) > 0;
 536   }
 537 
 538   // Changes bit at position pos at bit-layer layer to value v.
 539   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 540     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 541     const unsigned byteoffset = pos / 8;
 542     assert(byteoffset < _map_size,
 543            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 544     const unsigned mask = 1 << (pos % 8);
 545     if (v) {
 546       _map[layer][byteoffset] |= mask;
 547     } else {
 548       _map[layer][byteoffset] &= ~mask;
 549     }
 550   }
 551 
 552   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 553   // pos is 32/64 aligned and num_bits is 32/64.
 554   // This is the typical case when coalescing to medium chunks, whose size is
 555   // 32 or 64 times the specialized chunk size (depending on class or non class
 556   // case), so they occupy 64 bits which should be 64bit aligned, because
 557   // chunks are chunk-size aligned.
 558   template <typename T>
 559   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 560     assert(_map_size > 0, "not initialized");
 561     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 562     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 563     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 564     const size_t byteoffset = pos / 8;
 565     assert(byteoffset <= (_map_size - sizeof(T)),
 566            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 567     const T w = *(T*)(_map[layer] + byteoffset);
 568     return w > 0 ? true : false;
 569   }
 570 
 571   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 572   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 573     if (pos % 32 == 0 && num_bits == 32) {
 574       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 575     } else if (pos % 64 == 0 && num_bits == 64) {
 576       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 577     } else {
 578       for (unsigned n = 0; n < num_bits; n ++) {
 579         if (get_bit_at_position(pos + n, layer)) {
 580           return true;
 581         }
 582       }
 583     }
 584     return false;
 585   }
 586 
 587   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 588   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 589     assert(word_size % _smallest_chunk_word_size == 0,
 590         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 591     const unsigned pos = get_bitpos_for_address(p);
 592     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 593     return is_any_bit_set_in_region(pos, num_bits, layer);
 594   }
 595 
 596   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 597   // pos is 32/64 aligned and num_bits is 32/64.
 598   // This is the typical case when coalescing to medium chunks, whose size
 599   // is 32 or 64 times the specialized chunk size (depending on class or non
 600   // class case), so they occupy 64 bits which should be 64bit aligned,
 601   // because chunks are chunk-size aligned.
 602   template <typename T>
 603   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 604     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 605            (unsigned)(sizeof(T) * 8), pos);
 606     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 607            num_bits, (unsigned)(sizeof(T) * 8));
 608     const size_t byteoffset = pos / 8;
 609     assert(byteoffset <= (_map_size - sizeof(T)),
 610            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 611     T* const pw = (T*)(_map[layer] + byteoffset);
 612     *pw = v ? all_ones<T>::value : (T) 0;
 613   }
 614 
 615   // Set all bits in a region starting at pos to a value.
 616   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 617     assert(_map_size > 0, "not initialized");
 618     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 619     if (pos % 32 == 0 && num_bits == 32) {
 620       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 621     } else if (pos % 64 == 0 && num_bits == 64) {
 622       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 623     } else {
 624       for (unsigned n = 0; n < num_bits; n ++) {
 625         set_bit_at_position(pos + n, layer, v);
 626       }
 627     }
 628   }
 629 
 630   // Helper: sets all bits in a region [p, p+word_size).
 631   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 632     assert(word_size % _smallest_chunk_word_size == 0,
 633         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 634     const unsigned pos = get_bitpos_for_address(p);
 635     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 636     set_bits_of_region(pos, num_bits, layer, v);
 637   }
 638 
 639   // Helper: given an address, return the bit position representing that address.
 640   unsigned get_bitpos_for_address(const MetaWord* p) const {
 641     assert(_reference_address != NULL, "not initialized");
 642     assert(p >= _reference_address && p < _reference_address + _word_size,
 643            "Address %p out of range for occupancy map [%p..%p).",
 644             p, _reference_address, _reference_address + _word_size);
 645     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 646            "Address not aligned (%p).", p);
 647     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 648     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 649     return (unsigned) d;
 650   }
 651 
 652  public:
 653 
 654   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 655     _reference_address(reference_address), _word_size(word_size),
 656     _smallest_chunk_word_size(smallest_chunk_word_size) {
 657     assert(reference_address != NULL, "invalid reference address");
 658     assert(is_aligned(reference_address, smallest_chunk_word_size),
 659            "Reference address not aligned to smallest chunk size.");
 660     assert(is_aligned(word_size, smallest_chunk_word_size),
 661            "Word_size shall be a multiple of the smallest chunk size.");
 662     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 663     size_t num_bits = word_size / smallest_chunk_word_size;
 664     _map_size = (num_bits + 7) / 8;
 665     assert(_map_size * 8 >= num_bits, "sanity");
 666     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 667     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 668     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 669     memset(_map[1], 0, _map_size);
 670     memset(_map[0], 0, _map_size);
 671     // Sanity test: the first respectively last possible chunk start address in
 672     // the covered range shall map to the first and last bit in the bitmap.
 673     assert(get_bitpos_for_address(reference_address) == 0,
 674       "First chunk address in range must map to fist bit in bitmap.");
 675     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 676       "Last chunk address in range must map to last bit in bitmap.");
 677   }
 678 
 679   ~OccupancyMap() {
 680     os::free(_map[0]);
 681     os::free(_map[1]);
 682   }
 683 
 684   // Returns true if at address x a chunk is starting.
 685   bool chunk_starts_at_address(MetaWord* p) const {
 686     const unsigned pos = get_bitpos_for_address(p);
 687     return get_bit_at_position(pos, layer_chunk_start_map);
 688   }
 689 
 690   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 691     const unsigned pos = get_bitpos_for_address(p);
 692     set_bit_at_position(pos, layer_chunk_start_map, v);
 693   }
 694 
 695   // Removes all chunk-start-bits inside a region, typically as a
 696   // result of a chunk merge.
 697   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 698     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 699   }
 700 
 701   // Returns true if there are life (in use) chunks in the region limited
 702   // by [p, p+word_size).
 703   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 704     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 705   }
 706 
 707   // Marks the region starting at p with the size word_size as in use
 708   // or free, depending on v.
 709   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 710     set_bits_of_region(p, word_size, layer_in_use_map, v);
 711   }
 712 
 713 #ifdef ASSERT
 714   // Verify occupancy map for the address range [from, to).
 715   // We need to tell it the address range, because the memory the
 716   // occupancy map is covering may not be fully comitted yet.
 717   void verify(MetaWord* from, MetaWord* to) {
 718     Metachunk* chunk = NULL;
 719     int nth_bit_for_chunk = 0;
 720     MetaWord* chunk_end = NULL;
 721     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 722       const unsigned pos = get_bitpos_for_address(p);
 723       // Check the chunk-starts-info:
 724       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 725         // Chunk start marked in bitmap.
 726         chunk = (Metachunk*) p;
 727         if (chunk_end != NULL) {
 728           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 729                  "the next chunk to start at %p).", p, chunk_end);
 730         }
 731         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 732         if (chunk->get_chunk_type() != HumongousIndex) {
 733           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 734         }
 735         chunk_end = p + chunk->word_size();
 736         nth_bit_for_chunk = 0;
 737         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 738       } else {
 739         // No chunk start marked in bitmap.
 740         assert(chunk != NULL, "Chunk should start at start of address range.");
 741         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 742         nth_bit_for_chunk ++;
 743       }
 744       // Check the in-use-info:
 745       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 746       if (in_use_bit) {
 747         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 748                chunk, nth_bit_for_chunk);
 749       } else {
 750         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 751                chunk, nth_bit_for_chunk);
 752       }
 753     }
 754   }
 755 
 756   // Verify that a given chunk is correctly accounted for in the bitmap.
 757   void verify_for_chunk(Metachunk* chunk) {
 758     assert(chunk_starts_at_address((MetaWord*) chunk),
 759            "No chunk start marked in map for chunk %p.", chunk);
 760     // For chunks larger than the minimal chunk size, no other chunk
 761     // must start in its area.
 762     if (chunk->word_size() > _smallest_chunk_word_size) {
 763       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 764                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 765              "No chunk must start within another chunk.");
 766     }
 767     if (!chunk->is_tagged_free()) {
 768       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 769              "Chunk %p is in use but marked as free in map (%d %d).",
 770              chunk, chunk->get_chunk_type(), chunk->get_origin());
 771     } else {
 772       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 773              "Chunk %p is free but marked as in-use in map (%d %d).",
 774              chunk, chunk->get_chunk_type(), chunk->get_origin());
 775     }
 776   }
 777 
 778 #endif // ASSERT
 779 
 780 };
 781 
 782 // A VirtualSpaceList node.
 783 class VirtualSpaceNode : public CHeapObj<mtClass> {
 784   friend class VirtualSpaceList;
 785 
 786   // Link to next VirtualSpaceNode
 787   VirtualSpaceNode* _next;
 788 
 789   // Whether this node is contained in class or metaspace.
 790   const bool _is_class;
 791 
 792   // total in the VirtualSpace
 793   MemRegion _reserved;
 794   ReservedSpace _rs;
 795   VirtualSpace _virtual_space;
 796   MetaWord* _top;
 797   // count of chunks contained in this VirtualSpace
 798   uintx _container_count;
 799 
 800   OccupancyMap* _occupancy_map;
 801 
 802   // Convenience functions to access the _virtual_space
 803   char* low()  const { return virtual_space()->low(); }
 804   char* high() const { return virtual_space()->high(); }
 805 
 806   // The first Metachunk will be allocated at the bottom of the
 807   // VirtualSpace
 808   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 809 
 810   // Committed but unused space in the virtual space
 811   size_t free_words_in_vs() const;
 812 
 813   // True if this node belongs to class metaspace.
 814   bool is_class() const { return _is_class; }
 815 
 816   // Helper function for take_from_committed: allocate padding chunks
 817   // until top is at the given address.
 818   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 819 
 820  public:
 821 
 822   VirtualSpaceNode(bool is_class, size_t byte_size);
 823   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 824     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 825   ~VirtualSpaceNode();
 826 
 827   // Convenience functions for logical bottom and end
 828   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 829   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 830 
 831   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 832   OccupancyMap* occupancy_map() { return _occupancy_map; }
 833 
 834   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 835 
 836   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 837   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 838 
 839   bool is_pre_committed() const { return _virtual_space.special(); }
 840 
 841   // address of next available space in _virtual_space;
 842   // Accessors
 843   VirtualSpaceNode* next() { return _next; }
 844   void set_next(VirtualSpaceNode* v) { _next = v; }
 845 
 846   void set_reserved(MemRegion const v) { _reserved = v; }
 847   void set_top(MetaWord* v) { _top = v; }
 848 
 849   // Accessors
 850   MemRegion* reserved() { return &_reserved; }
 851   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 852 
 853   // Returns true if "word_size" is available in the VirtualSpace
 854   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 855 
 856   MetaWord* top() const { return _top; }
 857   void inc_top(size_t word_size) { _top += word_size; }
 858 
 859   uintx container_count() { return _container_count; }
 860   void inc_container_count();
 861   void dec_container_count();
 862 #ifdef ASSERT
 863   uintx container_count_slow();
 864   void verify_container_count();
 865 #endif
 866 
 867   // used and capacity in this single entry in the list
 868   size_t used_words_in_vs() const;
 869   size_t capacity_words_in_vs() const;
 870 
 871   bool initialize();
 872 
 873   // get space from the virtual space
 874   Metachunk* take_from_committed(size_t chunk_word_size);
 875 
 876   // Allocate a chunk from the virtual space and return it.
 877   Metachunk* get_chunk_vs(size_t chunk_word_size);
 878 
 879   // Expands/shrinks the committed space in a virtual space.  Delegates
 880   // to Virtualspace
 881   bool expand_by(size_t min_words, size_t preferred_words);
 882 
 883   // In preparation for deleting this node, remove all the chunks
 884   // in the node from any freelist.
 885   void purge(ChunkManager* chunk_manager);
 886 
 887   // If an allocation doesn't fit in the current node a new node is created.
 888   // Allocate chunks out of the remaining committed space in this node
 889   // to avoid wasting that memory.
 890   // This always adds up because all the chunk sizes are multiples of
 891   // the smallest chunk size.
 892   void retire(ChunkManager* chunk_manager);
 893 
 894 
 895   void print_on(outputStream* st) const                 { print_on(st, K); }
 896   void print_on(outputStream* st, size_t scale) const;
 897   void print_map(outputStream* st, bool is_class) const;
 898 
 899   // Debug support
 900   DEBUG_ONLY(void mangle();)
 901   // Verify counters, all chunks in this list node and the occupancy map.
 902   DEBUG_ONLY(void verify();)
 903   // Verify that all free chunks in this node are ideally merged
 904   // (there not should be multiple small chunks where a large chunk could exist.)
 905   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 906 
 907 };
 908 
 909 #define assert_is_aligned(value, alignment)                  \
 910   assert(is_aligned((value), (alignment)),                   \
 911          SIZE_FORMAT_HEX " is not aligned to "               \
 912          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 913 
 914 #define assert_counter(expected_value, real_value, msg) \
 915   assert( (expected_value) == (real_value),             \
 916          "Counter mismatch (%s): expected " SIZE_FORMAT \
 917          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
 918          real_value);
 919 
 920 // Decide if large pages should be committed when the memory is reserved.
 921 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 922   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 923     size_t words = bytes / BytesPerWord;
 924     bool is_class = false; // We never reserve large pages for the class space.
 925     if (MetaspaceGC::can_expand(words, is_class) &&
 926         MetaspaceGC::allowed_expansion() >= words) {
 927       return true;
 928     }
 929   }
 930 
 931   return false;
 932 }
 933 
 934   // byte_size is the size of the associated virtualspace.
 935 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 936   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 937   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 938   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 939   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 940 
 941   if (_rs.is_reserved()) {
 942     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 943     assert(_rs.size() != 0, "Catch if we get a 0 size");
 944     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 945     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 946 
 947     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 948   }
 949 }
 950 
 951 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 952   DEBUG_ONLY(this->verify();)
 953   Metachunk* chunk = first_chunk();
 954   Metachunk* invalid_chunk = (Metachunk*) top();
 955   while (chunk < invalid_chunk ) {
 956     assert(chunk->is_tagged_free(), "Should be tagged free");
 957     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 958     chunk_manager->remove_chunk(chunk);
 959     chunk->remove_sentinel();
 960     assert(chunk->next() == NULL &&
 961            chunk->prev() == NULL,
 962            "Was not removed from its list");
 963     chunk = (Metachunk*) next;
 964   }
 965 }
 966 
 967 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 968 
 969   if (bottom() == top()) {
 970     return;
 971   }
 972 
 973   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 974   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 975   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 976 
 977   int line_len = 100;
 978   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 979   line_len = (int)(section_len / spec_chunk_size);
 980 
 981   static const int NUM_LINES = 4;
 982 
 983   char* lines[NUM_LINES];
 984   for (int i = 0; i < NUM_LINES; i ++) {
 985     lines[i] = (char*)os::malloc(line_len, mtInternal);
 986   }
 987   int pos = 0;
 988   const MetaWord* p = bottom();
 989   const Metachunk* chunk = (const Metachunk*)p;
 990   const MetaWord* chunk_end = p + chunk->word_size();
 991   while (p < top()) {
 992     if (pos == line_len) {
 993       pos = 0;
 994       for (int i = 0; i < NUM_LINES; i ++) {
 995         st->fill_to(22);
 996         st->print_raw(lines[i], line_len);
 997         st->cr();
 998       }
 999     }
1000     if (pos == 0) {
1001       st->print(PTR_FORMAT ":", p2i(p));
1002     }
1003     if (p == chunk_end) {
1004       chunk = (Metachunk*)p;
1005       chunk_end = p + chunk->word_size();
1006     }
1007     // line 1: chunk starting points (a dot if that area is a chunk start).
1008     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
1009 
1010     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
1011     // chunk is in use.
1012     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
1013     if (chunk->word_size() == spec_chunk_size) {
1014       lines[1][pos] = chunk_is_free ? 'x' : 'X';
1015     } else if (chunk->word_size() == small_chunk_size) {
1016       lines[1][pos] = chunk_is_free ? 's' : 'S';
1017     } else if (chunk->word_size() == med_chunk_size) {
1018       lines[1][pos] = chunk_is_free ? 'm' : 'M';
1019     } else if (chunk->word_size() > med_chunk_size) {
1020       lines[1][pos] = chunk_is_free ? 'h' : 'H';
1021     } else {
1022       ShouldNotReachHere();
1023     }
1024 
1025     // Line 3: chunk origin
1026     const ChunkOrigin origin = chunk->get_origin();
1027     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
1028 
1029     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
1030     //         but were never used.
1031     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
1032 
1033     p += spec_chunk_size;
1034     pos ++;
1035   }
1036   if (pos > 0) {
1037     for (int i = 0; i < NUM_LINES; i ++) {
1038       st->fill_to(22);
1039       st->print_raw(lines[i], line_len);
1040       st->cr();
1041     }
1042   }
1043   for (int i = 0; i < NUM_LINES; i ++) {
1044     os::free(lines[i]);
1045   }
1046 }
1047 
1048 
1049 #ifdef ASSERT
1050 uintx VirtualSpaceNode::container_count_slow() {
1051   uintx count = 0;
1052   Metachunk* chunk = first_chunk();
1053   Metachunk* invalid_chunk = (Metachunk*) top();
1054   while (chunk < invalid_chunk ) {
1055     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1056     do_verify_chunk(chunk);
1057     // Don't count the chunks on the free lists.  Those are
1058     // still part of the VirtualSpaceNode but not currently
1059     // counted.
1060     if (!chunk->is_tagged_free()) {
1061       count++;
1062     }
1063     chunk = (Metachunk*) next;
1064   }
1065   return count;
1066 }
1067 #endif
1068 
1069 #ifdef ASSERT
1070 // Verify counters, all chunks in this list node and the occupancy map.
1071 void VirtualSpaceNode::verify() {
1072   uintx num_in_use_chunks = 0;
1073   Metachunk* chunk = first_chunk();
1074   Metachunk* invalid_chunk = (Metachunk*) top();
1075 
1076   // Iterate the chunks in this node and verify each chunk.
1077   while (chunk < invalid_chunk ) {
1078     DEBUG_ONLY(do_verify_chunk(chunk);)
1079     if (!chunk->is_tagged_free()) {
1080       num_in_use_chunks ++;
1081     }
1082     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1083     chunk = (Metachunk*) next;
1084   }
1085   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1086          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1087   // Also verify the occupancy map.
1088   occupancy_map()->verify(this->bottom(), this->top());
1089 }
1090 #endif // ASSERT
1091 
1092 #ifdef ASSERT
1093 // Verify that all free chunks in this node are ideally merged
1094 // (there not should be multiple small chunks where a large chunk could exist.)
1095 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1096   Metachunk* chunk = first_chunk();
1097   Metachunk* invalid_chunk = (Metachunk*) top();
1098   // Shorthands.
1099   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1100   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1101   int num_free_chunks_since_last_med_boundary = -1;
1102   int num_free_chunks_since_last_small_boundary = -1;
1103   while (chunk < invalid_chunk ) {
1104     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1105     // Reset the counter when encountering a non-free chunk.
1106     if (chunk->get_chunk_type() != HumongousIndex) {
1107       if (chunk->is_tagged_free()) {
1108         // Count successive free, non-humongous chunks.
1109         if (is_aligned(chunk, size_small)) {
1110           assert(num_free_chunks_since_last_small_boundary <= 1,
1111                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1112           num_free_chunks_since_last_small_boundary = 0;
1113         } else if (num_free_chunks_since_last_small_boundary != -1) {
1114           num_free_chunks_since_last_small_boundary ++;
1115         }
1116         if (is_aligned(chunk, size_med)) {
1117           assert(num_free_chunks_since_last_med_boundary <= 1,
1118                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1119           num_free_chunks_since_last_med_boundary = 0;
1120         } else if (num_free_chunks_since_last_med_boundary != -1) {
1121           num_free_chunks_since_last_med_boundary ++;
1122         }
1123       } else {
1124         // Encountering a non-free chunk, reset counters.
1125         num_free_chunks_since_last_med_boundary = -1;
1126         num_free_chunks_since_last_small_boundary = -1;
1127       }
1128     } else {
1129       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1130       num_free_chunks_since_last_med_boundary = -1;
1131       num_free_chunks_since_last_small_boundary = -1;
1132     }
1133 
1134     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1135     chunk = (Metachunk*) next;
1136   }
1137 }
1138 #endif // ASSERT
1139 
1140 // List of VirtualSpaces for metadata allocation.
1141 class VirtualSpaceList : public CHeapObj<mtClass> {
1142   friend class VirtualSpaceNode;
1143 
1144   enum VirtualSpaceSizes {
1145     VirtualSpaceSize = 256 * K
1146   };
1147 
1148   // Head of the list
1149   VirtualSpaceNode* _virtual_space_list;
1150   // virtual space currently being used for allocations
1151   VirtualSpaceNode* _current_virtual_space;
1152 
1153   // Is this VirtualSpaceList used for the compressed class space
1154   bool _is_class;
1155 
1156   // Sum of reserved and committed memory in the virtual spaces
1157   size_t _reserved_words;
1158   size_t _committed_words;
1159 
1160   // Number of virtual spaces
1161   size_t _virtual_space_count;
1162 
1163   ~VirtualSpaceList();
1164 
1165   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1166 
1167   void set_virtual_space_list(VirtualSpaceNode* v) {
1168     _virtual_space_list = v;
1169   }
1170   void set_current_virtual_space(VirtualSpaceNode* v) {
1171     _current_virtual_space = v;
1172   }
1173 
1174   void link_vs(VirtualSpaceNode* new_entry);
1175 
1176   // Get another virtual space and add it to the list.  This
1177   // is typically prompted by a failed attempt to allocate a chunk
1178   // and is typically followed by the allocation of a chunk.
1179   bool create_new_virtual_space(size_t vs_word_size);
1180 
1181   // Chunk up the unused committed space in the current
1182   // virtual space and add the chunks to the free list.
1183   void retire_current_virtual_space();
1184 
1185  public:
1186   VirtualSpaceList(size_t word_size);
1187   VirtualSpaceList(ReservedSpace rs);
1188 
1189   size_t free_bytes();
1190 
1191   Metachunk* get_new_chunk(size_t chunk_word_size,
1192                            size_t suggested_commit_granularity);
1193 
1194   bool expand_node_by(VirtualSpaceNode* node,
1195                       size_t min_words,
1196                       size_t preferred_words);
1197 
1198   bool expand_by(size_t min_words,
1199                  size_t preferred_words);
1200 
1201   VirtualSpaceNode* current_virtual_space() {
1202     return _current_virtual_space;
1203   }
1204 
1205   bool is_class() const { return _is_class; }
1206 
1207   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1208 
1209   size_t reserved_words()  { return _reserved_words; }
1210   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1211   size_t committed_words() { return _committed_words; }
1212   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1213 
1214   void inc_reserved_words(size_t v);
1215   void dec_reserved_words(size_t v);
1216   void inc_committed_words(size_t v);
1217   void dec_committed_words(size_t v);
1218   void inc_virtual_space_count();
1219   void dec_virtual_space_count();
1220 
1221   bool contains(const void* ptr);
1222 
1223   // Unlink empty VirtualSpaceNodes and free it.
1224   void purge(ChunkManager* chunk_manager);
1225 
1226   void print_on(outputStream* st) const                 { print_on(st, K); }
1227   void print_on(outputStream* st, size_t scale) const;
1228   void print_map(outputStream* st) const;
1229 
1230   class VirtualSpaceListIterator : public StackObj {
1231     VirtualSpaceNode* _virtual_spaces;
1232    public:
1233     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1234       _virtual_spaces(virtual_spaces) {}
1235 
1236     bool repeat() {
1237       return _virtual_spaces != NULL;
1238     }
1239 
1240     VirtualSpaceNode* get_next() {
1241       VirtualSpaceNode* result = _virtual_spaces;
1242       if (_virtual_spaces != NULL) {
1243         _virtual_spaces = _virtual_spaces->next();
1244       }
1245       return result;
1246     }
1247   };
1248 };
1249 
1250 class Metadebug : AllStatic {
1251   // Debugging support for Metaspaces
1252   static int _allocation_fail_alot_count;
1253 
1254  public:
1255 
1256   static void init_allocation_fail_alot_count();
1257 #ifdef ASSERT
1258   static bool test_metadata_failure();
1259 #endif
1260 };
1261 
1262 int Metadebug::_allocation_fail_alot_count = 0;
1263 
1264 
1265 //  SpaceManager - used by Metaspace to handle allocations
1266 class SpaceManager : public CHeapObj<mtClass> {
1267   friend class ClassLoaderMetaspace;
1268   friend class Metadebug;
1269 
1270  private:
1271 
1272   // protects allocations
1273   Mutex* const _lock;
1274 
1275   // Type of metadata allocated.
1276   const Metaspace::MetadataType   _mdtype;
1277 
1278   // Type of metaspace
1279   const Metaspace::MetaspaceType  _space_type;
1280 
1281   // List of chunks in use by this SpaceManager.  Allocations
1282   // are done from the current chunk.  The list is used for deallocating
1283   // chunks when the SpaceManager is freed.
1284   Metachunk* _chunk_list;
1285   Metachunk* _current_chunk;
1286 
1287   // Maximum number of small chunks to allocate to a SpaceManager
1288   static uint const _small_chunk_limit;
1289 
1290   // Maximum number of specialize chunks to allocate for anonymous and delegating
1291   // metadata space to a SpaceManager
1292   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1293 
1294   // Some running counters, but lets keep their number small to not add to much to
1295   // the per-classloader footprint.
1296   // Note: capacity = used + free + waste + overhead. We do not keep running counters for
1297   // free and waste. Their sum can be deduced from the three other values.
1298   size_t _overhead_words;
1299   size_t _capacity_words;
1300   size_t _used_words;
1301   uintx _num_chunks_by_type[NumberOfInUseLists];
1302 
1303   // Free lists of blocks are per SpaceManager since they
1304   // are assumed to be in chunks in use by the SpaceManager
1305   // and all chunks in use by a SpaceManager are freed when
1306   // the class loader using the SpaceManager is collected.
1307   BlockFreelist* _block_freelists;
1308 
1309  private:
1310   // Accessors
1311   Metachunk* chunk_list() const { return _chunk_list; }
1312 
1313   BlockFreelist* block_freelists() const { return _block_freelists; }
1314 
1315   Metaspace::MetadataType mdtype() { return _mdtype; }
1316 
1317   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1318   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1319 
1320   Metachunk* current_chunk() const { return _current_chunk; }
1321   void set_current_chunk(Metachunk* v) {
1322     _current_chunk = v;
1323   }
1324 
1325   Metachunk* find_current_chunk(size_t word_size);
1326 
1327   // Add chunk to the list of chunks in use
1328   void add_chunk(Metachunk* v, bool make_current);
1329   void retire_current_chunk();
1330 
1331   Mutex* lock() const { return _lock; }
1332 
1333   // Adds to the given statistic object. Expects to be locked with lock().
1334   void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1335 
1336   // Verify internal counters against the current state. Expects to be locked with lock().
1337   DEBUG_ONLY(void verify_metrics_locked() const;)
1338 
1339  public:
1340   SpaceManager(Metaspace::MetadataType mdtype,
1341                Metaspace::MetaspaceType space_type,
1342                Mutex* lock);
1343   ~SpaceManager();
1344 
1345   enum ChunkMultiples {
1346     MediumChunkMultiple = 4
1347   };
1348 
1349   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1350   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1351   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1352 
1353   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1354 
1355   // Accessors
1356   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1357 
1358   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1359   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1360   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1361 
1362   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1363 
1364   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1365 
1366   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1367 
1368   size_t capacity_words() const     { return _capacity_words; }
1369   size_t used_words() const         { return _used_words; }
1370   size_t overhead_words() const     { return _overhead_words; }
1371 
1372   // Adjust local, global counters after a new chunk has been added.
1373   void account_for_new_chunk(const Metachunk* new_chunk);
1374 
1375   // Adjust local, global counters after space has been allocated from the current chunk.
1376   void account_for_allocation(size_t words);
1377 
1378   // Adjust global counters just before the SpaceManager dies, after all its chunks
1379   // have been returned to the freelist.
1380   void account_for_spacemanager_death();
1381 
1382   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1383   // or return the unadjusted size if the requested size is humongous.
1384   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1385   size_t adjust_initial_chunk_size(size_t requested) const;
1386 
1387   // Get the initial chunks size for this metaspace type.
1388   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1389 
1390   // Todo: remove this once we have counters by chunk type.
1391   uintx num_chunks_by_type(ChunkIndex chunk_type) const       { return _num_chunks_by_type[chunk_type]; }
1392 
1393   Metachunk* get_new_chunk(size_t chunk_word_size);
1394 
1395   // Block allocation and deallocation.
1396   // Allocates a block from the current chunk
1397   MetaWord* allocate(size_t word_size);
1398 
1399   // Helper for allocations
1400   MetaWord* allocate_work(size_t word_size);
1401 
1402   // Returns a block to the per manager freelist
1403   void deallocate(MetaWord* p, size_t word_size);
1404 
1405   // Based on the allocation size and a minimum chunk size,
1406   // returned chunk size (for expanding space for chunk allocation).
1407   size_t calc_chunk_size(size_t allocation_word_size);
1408 
1409   // Called when an allocation from the current chunk fails.
1410   // Gets a new chunk (may require getting a new virtual space),
1411   // and allocates from that chunk.
1412   MetaWord* grow_and_allocate(size_t word_size);
1413 
1414   // Notify memory usage to MemoryService.
1415   void track_metaspace_memory_usage();
1416 
1417   // debugging support.
1418 
1419   void print_on(outputStream* st) const;
1420   void locked_print_chunks_in_use_on(outputStream* st) const;
1421 
1422   void verify();
1423   void verify_chunk_size(Metachunk* chunk);
1424 
1425   // This adjusts the size given to be greater than the minimum allocation size in
1426   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1427   size_t get_allocation_word_size(size_t word_size) {
1428     size_t byte_size = word_size * BytesPerWord;
1429 
1430     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1431     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1432 
1433     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1434     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1435 
1436     return raw_word_size;
1437   }
1438 
1439   // Adds to the given statistic object.
1440   void add_to_statistics(SpaceManagerStatistics* out) const;
1441 
1442   // Verify internal counters against the current state.
1443   DEBUG_ONLY(void verify_metrics() const;)
1444 
1445 };
1446 
1447 uint const SpaceManager::_small_chunk_limit = 4;
1448 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1449 
1450 void VirtualSpaceNode::inc_container_count() {
1451   assert_lock_strong(MetaspaceExpand_lock);
1452   _container_count++;
1453 }
1454 
1455 void VirtualSpaceNode::dec_container_count() {
1456   assert_lock_strong(MetaspaceExpand_lock);
1457   _container_count--;
1458 }
1459 
1460 #ifdef ASSERT
1461 void VirtualSpaceNode::verify_container_count() {
1462   assert(_container_count == container_count_slow(),
1463          "Inconsistency in container_count _container_count " UINTX_FORMAT
1464          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1465 }
1466 #endif
1467 
1468 // BlockFreelist methods
1469 
1470 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1471 
1472 BlockFreelist::~BlockFreelist() {
1473   delete _dictionary;
1474   if (_small_blocks != NULL) {
1475     delete _small_blocks;
1476   }
1477 }
1478 
1479 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1480   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1481 
1482   Metablock* free_chunk = ::new (p) Metablock(word_size);
1483   if (word_size < SmallBlocks::small_block_max_size()) {
1484     small_blocks()->return_block(free_chunk, word_size);
1485   } else {
1486   dictionary()->return_chunk(free_chunk);
1487 }
1488   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1489             SIZE_FORMAT, p2i(free_chunk), word_size);
1490 }
1491 
1492 MetaWord* BlockFreelist::get_block(size_t word_size) {
1493   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1494 
1495   // Try small_blocks first.
1496   if (word_size < SmallBlocks::small_block_max_size()) {
1497     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1498     // this space manager.
1499     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1500     if (new_block != NULL) {
1501       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1502               p2i(new_block), word_size);
1503       return new_block;
1504     }
1505   }
1506 
1507   if (word_size < BlockFreelist::min_dictionary_size()) {
1508     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1509     return NULL;
1510   }
1511 
1512   Metablock* free_block = dictionary()->get_chunk(word_size);
1513   if (free_block == NULL) {
1514     return NULL;
1515   }
1516 
1517   const size_t block_size = free_block->size();
1518   if (block_size > WasteMultiplier * word_size) {
1519     return_block((MetaWord*)free_block, block_size);
1520     return NULL;
1521   }
1522 
1523   MetaWord* new_block = (MetaWord*)free_block;
1524   assert(block_size >= word_size, "Incorrect size of block from freelist");
1525   const size_t unused = block_size - word_size;
1526   if (unused >= SmallBlocks::small_block_min_size()) {
1527     return_block(new_block + word_size, unused);
1528   }
1529 
1530   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1531             p2i(new_block), word_size);
1532   return new_block;
1533 }
1534 
1535 void BlockFreelist::print_on(outputStream* st) const {
1536   dictionary()->print_free_lists(st);
1537   if (_small_blocks != NULL) {
1538     _small_blocks->print_on(st);
1539   }
1540 }
1541 
1542 // VirtualSpaceNode methods
1543 
1544 VirtualSpaceNode::~VirtualSpaceNode() {
1545   _rs.release();
1546   if (_occupancy_map != NULL) {
1547     delete _occupancy_map;
1548   }
1549 #ifdef ASSERT
1550   size_t word_size = sizeof(*this) / BytesPerWord;
1551   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1552 #endif
1553 }
1554 
1555 size_t VirtualSpaceNode::used_words_in_vs() const {
1556   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1557 }
1558 
1559 // Space committed in the VirtualSpace
1560 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1561   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1562 }
1563 
1564 size_t VirtualSpaceNode::free_words_in_vs() const {
1565   return pointer_delta(end(), top(), sizeof(MetaWord));
1566 }
1567 
1568 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1569 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1570 
1571   assert(target_top > top(), "Sanity");
1572 
1573   // Padding chunks are added to the freelist.
1574   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1575 
1576   // shorthands
1577   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1578   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1579   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1580 
1581   while (top() < target_top) {
1582 
1583     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1584     // for padding chunks, so it is not worth it.
1585     size_t padding_chunk_word_size = small_word_size;
1586     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1587       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1588       padding_chunk_word_size = spec_word_size;
1589     }
1590     MetaWord* here = top();
1591     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1592     inc_top(padding_chunk_word_size);
1593 
1594     // Create new padding chunk.
1595     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1596     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1597 
1598     Metachunk* const padding_chunk =
1599       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1600     assert(padding_chunk == (Metachunk*)here, "Sanity");
1601     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1602     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1603                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1604                                        (is_class() ? "class space " : "metaspace"),
1605                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1606 
1607     // Mark chunk start in occupancy map.
1608     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1609 
1610     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1611     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1612     // will assert that).
1613     do_update_in_use_info_for_chunk(padding_chunk, true);
1614 
1615     // Return Chunk to freelist.
1616     inc_container_count();
1617     chunk_manager->return_single_chunk(padding_chunk);
1618     // Please note: at this point, ChunkManager::return_single_chunk()
1619     // may already have merged the padding chunk with neighboring chunks, so
1620     // it may have vanished at this point. Do not reference the padding
1621     // chunk beyond this point.
1622   }
1623 
1624   assert(top() == target_top, "Sanity");
1625 
1626 } // allocate_padding_chunks_until_top_is_at()
1627 
1628 // Allocates the chunk from the virtual space only.
1629 // This interface is also used internally for debugging.  Not all
1630 // chunks removed here are necessarily used for allocation.
1631 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1632   // Non-humongous chunks are to be allocated aligned to their chunk
1633   // size. So, start addresses of medium chunks are aligned to medium
1634   // chunk size, those of small chunks to small chunk size and so
1635   // forth. This facilitates merging of free chunks and reduces
1636   // fragmentation. Chunk sizes are spec < small < medium, with each
1637   // larger chunk size being a multiple of the next smaller chunk
1638   // size.
1639   // Because of this alignment, me may need to create a number of padding
1640   // chunks. These chunks are created and added to the freelist.
1641 
1642   // The chunk manager to which we will give our padding chunks.
1643   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1644 
1645   // shorthands
1646   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1647   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1648   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1649 
1650   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1651          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1652 
1653   // Chunk alignment (in bytes) == chunk size unless humongous.
1654   // Humongous chunks are aligned to the smallest chunk size (spec).
1655   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1656                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1657 
1658   // Do we have enough space to create the requested chunk plus
1659   // any padding chunks needed?
1660   MetaWord* const next_aligned =
1661     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1662   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1663     return NULL;
1664   }
1665 
1666   // Before allocating the requested chunk, allocate padding chunks if necessary.
1667   // We only need to do this for small or medium chunks: specialized chunks are the
1668   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1669   // (implicitly, also aligned to smallest chunk size).
1670   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1671     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1672         (is_class() ? "class space " : "metaspace"),
1673         top(), next_aligned);
1674     allocate_padding_chunks_until_top_is_at(next_aligned);
1675     // Now, top should be aligned correctly.
1676     assert_is_aligned(top(), required_chunk_alignment);
1677   }
1678 
1679   // Now, top should be aligned correctly.
1680   assert_is_aligned(top(), required_chunk_alignment);
1681 
1682   // Bottom of the new chunk
1683   MetaWord* chunk_limit = top();
1684   assert(chunk_limit != NULL, "Not safe to call this method");
1685 
1686   // The virtual spaces are always expanded by the
1687   // commit granularity to enforce the following condition.
1688   // Without this the is_available check will not work correctly.
1689   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1690       "The committed memory doesn't match the expanded memory.");
1691 
1692   if (!is_available(chunk_word_size)) {
1693     LogTarget(Debug, gc, metaspace, freelist) lt;
1694     if (lt.is_enabled()) {
1695       LogStream ls(lt);
1696       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1697       // Dump some information about the virtual space that is nearly full
1698       print_on(&ls);
1699     }
1700     return NULL;
1701   }
1702 
1703   // Take the space  (bump top on the current virtual space).
1704   inc_top(chunk_word_size);
1705 
1706   // Initialize the chunk
1707   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1708   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1709   assert(result == (Metachunk*)chunk_limit, "Sanity");
1710   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1711   do_update_in_use_info_for_chunk(result, true);
1712 
1713   inc_container_count();
1714 
1715   if (VerifyMetaspace) {
1716     DEBUG_ONLY(chunk_manager->locked_verify());
1717     DEBUG_ONLY(this->verify());
1718   }
1719 
1720   DEBUG_ONLY(do_verify_chunk(result));
1721 
1722   result->inc_use_count();
1723 
1724   return result;
1725 }
1726 
1727 
1728 // Expand the virtual space (commit more of the reserved space)
1729 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1730   size_t min_bytes = min_words * BytesPerWord;
1731   size_t preferred_bytes = preferred_words * BytesPerWord;
1732 
1733   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1734 
1735   if (uncommitted < min_bytes) {
1736     return false;
1737   }
1738 
1739   size_t commit = MIN2(preferred_bytes, uncommitted);
1740   bool result = virtual_space()->expand_by(commit, false);
1741 
1742   if (result) {
1743     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1744               (is_class() ? "class" : "non-class"), commit);
1745     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
1746   } else {
1747     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1748               (is_class() ? "class" : "non-class"), commit);
1749   }
1750 
1751   assert(result, "Failed to commit memory");
1752 
1753   return result;
1754 }
1755 
1756 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1757   assert_lock_strong(MetaspaceExpand_lock);
1758   Metachunk* result = take_from_committed(chunk_word_size);
1759   return result;
1760 }
1761 
1762 bool VirtualSpaceNode::initialize() {
1763 
1764   if (!_rs.is_reserved()) {
1765     return false;
1766   }
1767 
1768   // These are necessary restriction to make sure that the virtual space always
1769   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1770   // aligned only the middle alignment of the VirtualSpace is used.
1771   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1772   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1773 
1774   // ReservedSpaces marked as special will have the entire memory
1775   // pre-committed. Setting a committed size will make sure that
1776   // committed_size and actual_committed_size agrees.
1777   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1778 
1779   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1780                                             Metaspace::commit_alignment());
1781   if (result) {
1782     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1783         "Checking that the pre-committed memory was registered by the VirtualSpace");
1784 
1785     set_top((MetaWord*)virtual_space()->low());
1786     set_reserved(MemRegion((HeapWord*)_rs.base(),
1787                  (HeapWord*)(_rs.base() + _rs.size())));
1788 
1789     assert(reserved()->start() == (HeapWord*) _rs.base(),
1790            "Reserved start was not set properly " PTR_FORMAT
1791            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1792     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1793            "Reserved size was not set properly " SIZE_FORMAT
1794            " != " SIZE_FORMAT, reserved()->word_size(),
1795            _rs.size() / BytesPerWord);
1796   }
1797 
1798   // Initialize Occupancy Map.
1799   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1800   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1801 
1802   return result;
1803 }
1804 
1805 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
1806   size_t used_words = used_words_in_vs();
1807   size_t commit_words = committed_words();
1808   size_t res_words = reserved_words();
1809   VirtualSpace* vs = virtual_space();
1810 
1811   st->print("node @" PTR_FORMAT ": ", p2i(this));
1812   st->print("reserved=");
1813   print_scaled_words(st, res_words, scale);
1814   st->print(", committed=");
1815   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
1816   st->print(", used=");
1817   print_scaled_words_and_percentage(st, used_words, res_words, scale);
1818   st->cr();
1819   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
1820            PTR_FORMAT ", " PTR_FORMAT ")",
1821            p2i(bottom()), p2i(top()), p2i(end()),
1822            p2i(vs->high_boundary()));
1823 }
1824 
1825 #ifdef ASSERT
1826 void VirtualSpaceNode::mangle() {
1827   size_t word_size = capacity_words_in_vs();
1828   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1829 }
1830 #endif // ASSERT
1831 
1832 // VirtualSpaceList methods
1833 // Space allocated from the VirtualSpace
1834 
1835 VirtualSpaceList::~VirtualSpaceList() {
1836   VirtualSpaceListIterator iter(virtual_space_list());
1837   while (iter.repeat()) {
1838     VirtualSpaceNode* vsl = iter.get_next();
1839     delete vsl;
1840   }
1841 }
1842 
1843 void VirtualSpaceList::inc_reserved_words(size_t v) {
1844   assert_lock_strong(MetaspaceExpand_lock);
1845   _reserved_words = _reserved_words + v;
1846 }
1847 void VirtualSpaceList::dec_reserved_words(size_t v) {
1848   assert_lock_strong(MetaspaceExpand_lock);
1849   _reserved_words = _reserved_words - v;
1850 }
1851 
1852 #define assert_committed_below_limit()                        \
1853   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1854          "Too much committed memory. Committed: " SIZE_FORMAT \
1855          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1856           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1857 
1858 void VirtualSpaceList::inc_committed_words(size_t v) {
1859   assert_lock_strong(MetaspaceExpand_lock);
1860   _committed_words = _committed_words + v;
1861 
1862   assert_committed_below_limit();
1863 }
1864 void VirtualSpaceList::dec_committed_words(size_t v) {
1865   assert_lock_strong(MetaspaceExpand_lock);
1866   _committed_words = _committed_words - v;
1867 
1868   assert_committed_below_limit();
1869 }
1870 
1871 void VirtualSpaceList::inc_virtual_space_count() {
1872   assert_lock_strong(MetaspaceExpand_lock);
1873   _virtual_space_count++;
1874 }
1875 void VirtualSpaceList::dec_virtual_space_count() {
1876   assert_lock_strong(MetaspaceExpand_lock);
1877   _virtual_space_count--;
1878 }
1879 
1880 void ChunkManager::remove_chunk(Metachunk* chunk) {
1881   size_t word_size = chunk->word_size();
1882   ChunkIndex index = list_index(word_size);
1883   if (index != HumongousIndex) {
1884     free_chunks(index)->remove_chunk(chunk);
1885   } else {
1886     humongous_dictionary()->remove_chunk(chunk);
1887   }
1888 
1889   // Chunk has been removed from the chunks free list, update counters.
1890   account_for_removed_chunk(chunk);
1891 }
1892 
1893 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1894   assert_lock_strong(MetaspaceExpand_lock);
1895   assert(chunk != NULL, "invalid chunk pointer");
1896   // Check for valid merge combinations.
1897   assert((chunk->get_chunk_type() == SpecializedIndex &&
1898           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1899          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1900         "Invalid chunk merge combination.");
1901 
1902   const size_t target_chunk_word_size =
1903     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1904 
1905   // [ prospective merge region )
1906   MetaWord* const p_merge_region_start =
1907     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1908   MetaWord* const p_merge_region_end =
1909     p_merge_region_start + target_chunk_word_size;
1910 
1911   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1912   VirtualSpaceNode* const vsn = chunk->container();
1913   OccupancyMap* const ocmap = vsn->occupancy_map();
1914 
1915   // The prospective chunk merge range must be completely contained by the
1916   // committed range of the virtual space node.
1917   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1918     return false;
1919   }
1920 
1921   // Only attempt to merge this range if at its start a chunk starts and at its end
1922   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1923   // of that range, we cannot merge.
1924   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1925     return false;
1926   }
1927   if (p_merge_region_end < vsn->top() &&
1928       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1929     return false;
1930   }
1931 
1932   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1933   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1934     return false;
1935   }
1936 
1937   // Success! Remove all chunks in this region...
1938   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1939     (is_class() ? "class space" : "metaspace"),
1940     p_merge_region_start, p_merge_region_end);
1941 
1942   const int num_chunks_removed =
1943     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1944 
1945   // ... and create a single new bigger chunk.
1946   Metachunk* const p_new_chunk =
1947       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1948   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1949   p_new_chunk->set_origin(origin_merge);
1950 
1951   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1952     (is_class() ? "class space" : "metaspace"),
1953     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1954 
1955   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1956   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1957   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1958 
1959   // Mark chunk as free. Note: it is not necessary to update the occupancy
1960   // map in-use map, because the old chunks were also free, so nothing
1961   // should have changed.
1962   p_new_chunk->set_is_tagged_free(true);
1963 
1964   // Add new chunk to its freelist.
1965   ChunkList* const list = free_chunks(target_chunk_type);
1966   list->return_chunk_at_head(p_new_chunk);
1967 
1968   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1969   // should not have changed, because the size of the space should be the same)
1970   _free_chunks_count -= num_chunks_removed;
1971   _free_chunks_count ++;
1972 
1973   // VirtualSpaceNode::container_count does not have to be modified:
1974   // it means "number of active (non-free) chunks", so merging free chunks
1975   // should not affect that count.
1976 
1977   // At the end of a chunk merge, run verification tests.
1978   if (VerifyMetaspace) {
1979     DEBUG_ONLY(this->locked_verify());
1980     DEBUG_ONLY(vsn->verify());
1981   }
1982 
1983   return true;
1984 }
1985 
1986 // Remove all chunks in the given area - the chunks are supposed to be free -
1987 // from their corresponding freelists. Mark them as invalid.
1988 // - This does not correct the occupancy map.
1989 // - This does not adjust the counters in ChunkManager.
1990 // - Does not adjust container count counter in containing VirtualSpaceNode
1991 // Returns number of chunks removed.
1992 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1993   assert(p != NULL && word_size > 0, "Invalid range.");
1994   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1995   assert_is_aligned(word_size, smallest_chunk_size);
1996 
1997   Metachunk* const start = (Metachunk*) p;
1998   const Metachunk* const end = (Metachunk*)(p + word_size);
1999   Metachunk* cur = start;
2000   int num_removed = 0;
2001   while (cur < end) {
2002     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
2003     DEBUG_ONLY(do_verify_chunk(cur));
2004     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
2005     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
2006     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
2007       (is_class() ? "class space" : "metaspace"),
2008       cur, cur->word_size() * sizeof(MetaWord));
2009     cur->remove_sentinel();
2010     // Note: cannot call ChunkManager::remove_chunk, because that
2011     // modifies the counters in ChunkManager, which we do not want. So
2012     // we call remove_chunk on the freelist directly (see also the
2013     // splitting function which does the same).
2014     ChunkList* const list = free_chunks(list_index(cur->word_size()));
2015     list->remove_chunk(cur);
2016     num_removed ++;
2017     cur = next;
2018   }
2019   return num_removed;
2020 }
2021 
2022 // Walk the list of VirtualSpaceNodes and delete
2023 // nodes with a 0 container_count.  Remove Metachunks in
2024 // the node from their respective freelists.
2025 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2026   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2027   assert_lock_strong(MetaspaceExpand_lock);
2028   // Don't use a VirtualSpaceListIterator because this
2029   // list is being changed and a straightforward use of an iterator is not safe.
2030   VirtualSpaceNode* purged_vsl = NULL;
2031   VirtualSpaceNode* prev_vsl = virtual_space_list();
2032   VirtualSpaceNode* next_vsl = prev_vsl;
2033   while (next_vsl != NULL) {
2034     VirtualSpaceNode* vsl = next_vsl;
2035     DEBUG_ONLY(vsl->verify_container_count();)
2036     next_vsl = vsl->next();
2037     // Don't free the current virtual space since it will likely
2038     // be needed soon.
2039     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2040       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2041                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2042       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2043       // Unlink it from the list
2044       if (prev_vsl == vsl) {
2045         // This is the case of the current node being the first node.
2046         assert(vsl == virtual_space_list(), "Expected to be the first node");
2047         set_virtual_space_list(vsl->next());
2048       } else {
2049         prev_vsl->set_next(vsl->next());
2050       }
2051 
2052       vsl->purge(chunk_manager);
2053       dec_reserved_words(vsl->reserved_words());
2054       dec_committed_words(vsl->committed_words());
2055       dec_virtual_space_count();
2056       purged_vsl = vsl;
2057       delete vsl;
2058     } else {
2059       prev_vsl = vsl;
2060     }
2061   }
2062 #ifdef ASSERT
2063   if (purged_vsl != NULL) {
2064     // List should be stable enough to use an iterator here.
2065     VirtualSpaceListIterator iter(virtual_space_list());
2066     while (iter.repeat()) {
2067       VirtualSpaceNode* vsl = iter.get_next();
2068       assert(vsl != purged_vsl, "Purge of vsl failed");
2069     }
2070   }
2071 #endif
2072 }
2073 
2074 
2075 // This function looks at the mmap regions in the metaspace without locking.
2076 // The chunks are added with store ordering and not deleted except for at
2077 // unloading time during a safepoint.
2078 bool VirtualSpaceList::contains(const void* ptr) {
2079   // List should be stable enough to use an iterator here because removing virtual
2080   // space nodes is only allowed at a safepoint.
2081   VirtualSpaceListIterator iter(virtual_space_list());
2082   while (iter.repeat()) {
2083     VirtualSpaceNode* vsn = iter.get_next();
2084     if (vsn->contains(ptr)) {
2085       return true;
2086     }
2087   }
2088   return false;
2089 }
2090 
2091 void VirtualSpaceList::retire_current_virtual_space() {
2092   assert_lock_strong(MetaspaceExpand_lock);
2093 
2094   VirtualSpaceNode* vsn = current_virtual_space();
2095 
2096   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2097                                   Metaspace::chunk_manager_metadata();
2098 
2099   vsn->retire(cm);
2100 }
2101 
2102 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2103   DEBUG_ONLY(verify_container_count();)
2104   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2105   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2106     ChunkIndex index = (ChunkIndex)i;
2107     size_t chunk_size = chunk_manager->size_by_index(index);
2108 
2109     while (free_words_in_vs() >= chunk_size) {
2110       Metachunk* chunk = get_chunk_vs(chunk_size);
2111       // Chunk will be allocated aligned, so allocation may require
2112       // additional padding chunks. That may cause above allocation to
2113       // fail. Just ignore the failed allocation and continue with the
2114       // next smaller chunk size. As the VirtualSpaceNode comitted
2115       // size should be a multiple of the smallest chunk size, we
2116       // should always be able to fill the VirtualSpace completely.
2117       if (chunk == NULL) {
2118         break;
2119       }
2120       chunk_manager->return_single_chunk(chunk);
2121     }
2122     DEBUG_ONLY(verify_container_count();)
2123   }
2124   assert(free_words_in_vs() == 0, "should be empty now");
2125 }
2126 
2127 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2128                                    _is_class(false),
2129                                    _virtual_space_list(NULL),
2130                                    _current_virtual_space(NULL),
2131                                    _reserved_words(0),
2132                                    _committed_words(0),
2133                                    _virtual_space_count(0) {
2134   MutexLockerEx cl(MetaspaceExpand_lock,
2135                    Mutex::_no_safepoint_check_flag);
2136   create_new_virtual_space(word_size);
2137 }
2138 
2139 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2140                                    _is_class(true),
2141                                    _virtual_space_list(NULL),
2142                                    _current_virtual_space(NULL),
2143                                    _reserved_words(0),
2144                                    _committed_words(0),
2145                                    _virtual_space_count(0) {
2146   MutexLockerEx cl(MetaspaceExpand_lock,
2147                    Mutex::_no_safepoint_check_flag);
2148   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2149   bool succeeded = class_entry->initialize();
2150   if (succeeded) {
2151     link_vs(class_entry);
2152   }
2153 }
2154 
2155 size_t VirtualSpaceList::free_bytes() {
2156   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2157 }
2158 
2159 // Allocate another meta virtual space and add it to the list.
2160 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2161   assert_lock_strong(MetaspaceExpand_lock);
2162 
2163   if (is_class()) {
2164     assert(false, "We currently don't support more than one VirtualSpace for"
2165                   " the compressed class space. The initialization of the"
2166                   " CCS uses another code path and should not hit this path.");
2167     return false;
2168   }
2169 
2170   if (vs_word_size == 0) {
2171     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2172     return false;
2173   }
2174 
2175   // Reserve the space
2176   size_t vs_byte_size = vs_word_size * BytesPerWord;
2177   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2178 
2179   // Allocate the meta virtual space and initialize it.
2180   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2181   if (!new_entry->initialize()) {
2182     delete new_entry;
2183     return false;
2184   } else {
2185     assert(new_entry->reserved_words() == vs_word_size,
2186         "Reserved memory size differs from requested memory size");
2187     // ensure lock-free iteration sees fully initialized node
2188     OrderAccess::storestore();
2189     link_vs(new_entry);
2190     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2191     return true;
2192   }
2193 }
2194 
2195 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2196   if (virtual_space_list() == NULL) {
2197       set_virtual_space_list(new_entry);
2198   } else {
2199     current_virtual_space()->set_next(new_entry);
2200   }
2201   set_current_virtual_space(new_entry);
2202   inc_reserved_words(new_entry->reserved_words());
2203   inc_committed_words(new_entry->committed_words());
2204   inc_virtual_space_count();
2205 #ifdef ASSERT
2206   new_entry->mangle();
2207 #endif
2208   LogTarget(Trace, gc, metaspace) lt;
2209   if (lt.is_enabled()) {
2210     LogStream ls(lt);
2211     VirtualSpaceNode* vsl = current_virtual_space();
2212     ResourceMark rm;
2213     vsl->print_on(&ls);
2214   }
2215 }
2216 
2217 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2218                                       size_t min_words,
2219                                       size_t preferred_words) {
2220   size_t before = node->committed_words();
2221 
2222   bool result = node->expand_by(min_words, preferred_words);
2223 
2224   size_t after = node->committed_words();
2225 
2226   // after and before can be the same if the memory was pre-committed.
2227   assert(after >= before, "Inconsistency");
2228   inc_committed_words(after - before);
2229 
2230   return result;
2231 }
2232 
2233 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2234   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2235   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2236   assert(min_words <= preferred_words, "Invalid arguments");
2237 
2238   const char* const class_or_not = (is_class() ? "class" : "non-class");
2239 
2240   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2241     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2242               class_or_not);
2243     return  false;
2244   }
2245 
2246   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2247   if (allowed_expansion_words < min_words) {
2248     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2249               class_or_not);
2250     return false;
2251   }
2252 
2253   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2254 
2255   // Commit more memory from the the current virtual space.
2256   bool vs_expanded = expand_node_by(current_virtual_space(),
2257                                     min_words,
2258                                     max_expansion_words);
2259   if (vs_expanded) {
2260      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2261                class_or_not);
2262      return true;
2263   }
2264   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2265             class_or_not);
2266   retire_current_virtual_space();
2267 
2268   // Get another virtual space.
2269   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2270   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2271 
2272   if (create_new_virtual_space(grow_vs_words)) {
2273     if (current_virtual_space()->is_pre_committed()) {
2274       // The memory was pre-committed, so we are done here.
2275       assert(min_words <= current_virtual_space()->committed_words(),
2276           "The new VirtualSpace was pre-committed, so it"
2277           "should be large enough to fit the alloc request.");
2278       return true;
2279     }
2280 
2281     return expand_node_by(current_virtual_space(),
2282                           min_words,
2283                           max_expansion_words);
2284   }
2285 
2286   return false;
2287 }
2288 
2289 // Given a chunk, calculate the largest possible padding space which
2290 // could be required when allocating it.
2291 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2292   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2293   if (chunk_type != HumongousIndex) {
2294     // Normal, non-humongous chunks are allocated at chunk size
2295     // boundaries, so the largest padding space required would be that
2296     // minus the smallest chunk size.
2297     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2298     return chunk_word_size - smallest_chunk_size;
2299   } else {
2300     // Humongous chunks are allocated at smallest-chunksize
2301     // boundaries, so there is no padding required.
2302     return 0;
2303   }
2304 }
2305 
2306 
2307 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2308 
2309   // Allocate a chunk out of the current virtual space.
2310   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2311 
2312   if (next != NULL) {
2313     return next;
2314   }
2315 
2316   // The expand amount is currently only determined by the requested sizes
2317   // and not how much committed memory is left in the current virtual space.
2318 
2319   // We must have enough space for the requested size and any
2320   // additional reqired padding chunks.
2321   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2322 
2323   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2324   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2325   if (min_word_size >= preferred_word_size) {
2326     // Can happen when humongous chunks are allocated.
2327     preferred_word_size = min_word_size;
2328   }
2329 
2330   bool expanded = expand_by(min_word_size, preferred_word_size);
2331   if (expanded) {
2332     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2333     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2334   }
2335 
2336    return next;
2337 }
2338 
2339 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2340   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2341       _virtual_space_count, p2i(_current_virtual_space));
2342   VirtualSpaceListIterator iter(virtual_space_list());
2343   while (iter.repeat()) {
2344     st->cr();
2345     VirtualSpaceNode* node = iter.get_next();
2346     node->print_on(st, scale);
2347   }
2348 }
2349 
2350 void VirtualSpaceList::print_map(outputStream* st) const {
2351   VirtualSpaceNode* list = virtual_space_list();
2352   VirtualSpaceListIterator iter(list);
2353   unsigned i = 0;
2354   while (iter.repeat()) {
2355     st->print_cr("Node %u:", i);
2356     VirtualSpaceNode* node = iter.get_next();
2357     node->print_map(st, this->is_class());
2358     i ++;
2359   }
2360 }
2361 
2362 // MetaspaceGC methods
2363 
2364 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2365 // Within the VM operation after the GC the attempt to allocate the metadata
2366 // should succeed.  If the GC did not free enough space for the metaspace
2367 // allocation, the HWM is increased so that another virtualspace will be
2368 // allocated for the metadata.  With perm gen the increase in the perm
2369 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2370 // metaspace policy uses those as the small and large steps for the HWM.
2371 //
2372 // After the GC the compute_new_size() for MetaspaceGC is called to
2373 // resize the capacity of the metaspaces.  The current implementation
2374 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2375 // to resize the Java heap by some GC's.  New flags can be implemented
2376 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2377 // free space is desirable in the metaspace capacity to decide how much
2378 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2379 // free space is desirable in the metaspace capacity before decreasing
2380 // the HWM.
2381 
2382 // Calculate the amount to increase the high water mark (HWM).
2383 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2384 // another expansion is not requested too soon.  If that is not
2385 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2386 // If that is still not enough, expand by the size of the allocation
2387 // plus some.
2388 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2389   size_t min_delta = MinMetaspaceExpansion;
2390   size_t max_delta = MaxMetaspaceExpansion;
2391   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2392 
2393   if (delta <= min_delta) {
2394     delta = min_delta;
2395   } else if (delta <= max_delta) {
2396     // Don't want to hit the high water mark on the next
2397     // allocation so make the delta greater than just enough
2398     // for this allocation.
2399     delta = max_delta;
2400   } else {
2401     // This allocation is large but the next ones are probably not
2402     // so increase by the minimum.
2403     delta = delta + min_delta;
2404   }
2405 
2406   assert_is_aligned(delta, Metaspace::commit_alignment());
2407 
2408   return delta;
2409 }
2410 
2411 size_t MetaspaceGC::capacity_until_GC() {
2412   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2413   assert(value >= MetaspaceSize, "Not initialized properly?");
2414   return value;
2415 }
2416 
2417 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2418   assert_is_aligned(v, Metaspace::commit_alignment());
2419 
2420   size_t old_value = _capacity_until_GC;
2421   size_t new_value = old_value + v;
2422 
2423   if (new_value < old_value) {
2424     // The addition wrapped around, set new_value to aligned max value.
2425     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2426   }
2427 
2428   size_t res = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_value);
2429 
2430   if (old_value != res) {
2431     return false;
2432   }
2433 
2434   if (new_cap_until_GC != NULL) {
2435     *new_cap_until_GC = new_value;
2436   }
2437   if (old_cap_until_GC != NULL) {
2438     *old_cap_until_GC = old_value;
2439   }
2440   return true;
2441 }
2442 
2443 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2444   assert_is_aligned(v, Metaspace::commit_alignment());
2445 
2446   return Atomic::sub(v, &_capacity_until_GC);
2447 }
2448 
2449 void MetaspaceGC::initialize() {
2450   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2451   // we can't do a GC during initialization.
2452   _capacity_until_GC = MaxMetaspaceSize;
2453 }
2454 
2455 void MetaspaceGC::post_initialize() {
2456   // Reset the high-water mark once the VM initialization is done.
2457   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2458 }
2459 
2460 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2461   // Check if the compressed class space is full.
2462   if (is_class && Metaspace::using_class_space()) {
2463     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2464     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2465       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2466                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2467       return false;
2468     }
2469   }
2470 
2471   // Check if the user has imposed a limit on the metaspace memory.
2472   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2473   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2474     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2475               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2476     return false;
2477   }
2478 
2479   return true;
2480 }
2481 
2482 size_t MetaspaceGC::allowed_expansion() {
2483   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2484   size_t capacity_until_gc = capacity_until_GC();
2485 
2486   assert(capacity_until_gc >= committed_bytes,
2487          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2488          capacity_until_gc, committed_bytes);
2489 
2490   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2491   size_t left_until_GC = capacity_until_gc - committed_bytes;
2492   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2493   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2494             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2495             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2496 
2497   return left_to_commit / BytesPerWord;
2498 }
2499 
2500 void MetaspaceGC::compute_new_size() {
2501   assert(_shrink_factor <= 100, "invalid shrink factor");
2502   uint current_shrink_factor = _shrink_factor;
2503   _shrink_factor = 0;
2504 
2505   // Using committed_bytes() for used_after_gc is an overestimation, since the
2506   // chunk free lists are included in committed_bytes() and the memory in an
2507   // un-fragmented chunk free list is available for future allocations.
2508   // However, if the chunk free lists becomes fragmented, then the memory may
2509   // not be available for future allocations and the memory is therefore "in use".
2510   // Including the chunk free lists in the definition of "in use" is therefore
2511   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2512   // shrink below committed_bytes() and this has caused serious bugs in the past.
2513   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2514   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2515 
2516   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2517   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2518 
2519   const double min_tmp = used_after_gc / maximum_used_percentage;
2520   size_t minimum_desired_capacity =
2521     (size_t)MIN2(min_tmp, double(max_uintx));
2522   // Don't shrink less than the initial generation size
2523   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2524                                   MetaspaceSize);
2525 
2526   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2527   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2528                            minimum_free_percentage, maximum_used_percentage);
2529   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2530 
2531 
2532   size_t shrink_bytes = 0;
2533   if (capacity_until_GC < minimum_desired_capacity) {
2534     // If we have less capacity below the metaspace HWM, then
2535     // increment the HWM.
2536     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2537     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2538     // Don't expand unless it's significant
2539     if (expand_bytes >= MinMetaspaceExpansion) {
2540       size_t new_capacity_until_GC = 0;
2541       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2542       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2543 
2544       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2545                                                new_capacity_until_GC,
2546                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2547       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2548                                minimum_desired_capacity / (double) K,
2549                                expand_bytes / (double) K,
2550                                MinMetaspaceExpansion / (double) K,
2551                                new_capacity_until_GC / (double) K);
2552     }
2553     return;
2554   }
2555 
2556   // No expansion, now see if we want to shrink
2557   // We would never want to shrink more than this
2558   assert(capacity_until_GC >= minimum_desired_capacity,
2559          SIZE_FORMAT " >= " SIZE_FORMAT,
2560          capacity_until_GC, minimum_desired_capacity);
2561   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2562 
2563   // Should shrinking be considered?
2564   if (MaxMetaspaceFreeRatio < 100) {
2565     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2566     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2567     const double max_tmp = used_after_gc / minimum_used_percentage;
2568     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2569     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2570                                     MetaspaceSize);
2571     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2572                              maximum_free_percentage, minimum_used_percentage);
2573     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2574                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2575 
2576     assert(minimum_desired_capacity <= maximum_desired_capacity,
2577            "sanity check");
2578 
2579     if (capacity_until_GC > maximum_desired_capacity) {
2580       // Capacity too large, compute shrinking size
2581       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2582       // We don't want shrink all the way back to initSize if people call
2583       // System.gc(), because some programs do that between "phases" and then
2584       // we'd just have to grow the heap up again for the next phase.  So we
2585       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2586       // on the third call, and 100% by the fourth call.  But if we recompute
2587       // size without shrinking, it goes back to 0%.
2588       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2589 
2590       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2591 
2592       assert(shrink_bytes <= max_shrink_bytes,
2593              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2594              shrink_bytes, max_shrink_bytes);
2595       if (current_shrink_factor == 0) {
2596         _shrink_factor = 10;
2597       } else {
2598         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2599       }
2600       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2601                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2602       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2603                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2604     }
2605   }
2606 
2607   // Don't shrink unless it's significant
2608   if (shrink_bytes >= MinMetaspaceExpansion &&
2609       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2610     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2611     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2612                                              new_capacity_until_GC,
2613                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2614   }
2615 }
2616 
2617 // Metadebug methods
2618 
2619 void Metadebug::init_allocation_fail_alot_count() {
2620   if (MetadataAllocationFailALot) {
2621     _allocation_fail_alot_count =
2622       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2623   }
2624 }
2625 
2626 #ifdef ASSERT
2627 bool Metadebug::test_metadata_failure() {
2628   if (MetadataAllocationFailALot &&
2629       Threads::is_vm_complete()) {
2630     if (_allocation_fail_alot_count > 0) {
2631       _allocation_fail_alot_count--;
2632     } else {
2633       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2634       init_allocation_fail_alot_count();
2635       return true;
2636     }
2637   }
2638   return false;
2639 }
2640 #endif
2641 
2642 // ChunkManager methods
2643 size_t ChunkManager::free_chunks_total_words() {
2644   return _free_chunks_total;
2645 }
2646 
2647 size_t ChunkManager::free_chunks_total_bytes() {
2648   return free_chunks_total_words() * BytesPerWord;
2649 }
2650 
2651 // Update internal accounting after a chunk was added
2652 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2653   assert_lock_strong(MetaspaceExpand_lock);
2654   _free_chunks_count ++;
2655   _free_chunks_total += c->word_size();
2656 }
2657 
2658 // Update internal accounting after a chunk was removed
2659 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2660   assert_lock_strong(MetaspaceExpand_lock);
2661   assert(_free_chunks_count >= 1,
2662     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2663   assert(_free_chunks_total >= c->word_size(),
2664     "ChunkManager::_free_chunks_total: about to go negative"
2665      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2666   _free_chunks_count --;
2667   _free_chunks_total -= c->word_size();
2668 }
2669 
2670 size_t ChunkManager::free_chunks_count() {
2671 #ifdef ASSERT
2672   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
2673     MutexLockerEx cl(MetaspaceExpand_lock,
2674                      Mutex::_no_safepoint_check_flag);
2675     // This lock is only needed in debug because the verification
2676     // of the _free_chunks_totals walks the list of free chunks
2677     slow_locked_verify_free_chunks_count();
2678   }
2679 #endif
2680   return _free_chunks_count;
2681 }
2682 
2683 ChunkIndex ChunkManager::list_index(size_t size) {
2684   return get_chunk_type_by_size(size, is_class());
2685 }
2686 
2687 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2688   index_bounds_check(index);
2689   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2690   return get_size_for_nonhumongous_chunktype(index, is_class());
2691 }
2692 
2693 void ChunkManager::locked_verify_free_chunks_total() {
2694   assert_lock_strong(MetaspaceExpand_lock);
2695   assert(sum_free_chunks() == _free_chunks_total,
2696          "_free_chunks_total " SIZE_FORMAT " is not the"
2697          " same as sum " SIZE_FORMAT, _free_chunks_total,
2698          sum_free_chunks());
2699 }
2700 
2701 void ChunkManager::locked_verify_free_chunks_count() {
2702   assert_lock_strong(MetaspaceExpand_lock);
2703   assert(sum_free_chunks_count() == _free_chunks_count,
2704          "_free_chunks_count " SIZE_FORMAT " is not the"
2705          " same as sum " SIZE_FORMAT, _free_chunks_count,
2706          sum_free_chunks_count());
2707 }
2708 
2709 void ChunkManager::verify() {
2710   MutexLockerEx cl(MetaspaceExpand_lock,
2711                      Mutex::_no_safepoint_check_flag);
2712   locked_verify();
2713 }
2714 
2715 void ChunkManager::locked_verify() {
2716   locked_verify_free_chunks_count();
2717   locked_verify_free_chunks_total();
2718   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2719     ChunkList* list = free_chunks(i);
2720     if (list != NULL) {
2721       Metachunk* chunk = list->head();
2722       while (chunk) {
2723         DEBUG_ONLY(do_verify_chunk(chunk);)
2724         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2725         chunk = chunk->next();
2726       }
2727     }
2728   }
2729 }
2730 
2731 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2732   assert_lock_strong(MetaspaceExpand_lock);
2733   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2734                 _free_chunks_total, _free_chunks_count);
2735 }
2736 
2737 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2738   assert_lock_strong(MetaspaceExpand_lock);
2739   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2740                 sum_free_chunks(), sum_free_chunks_count());
2741 }
2742 
2743 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2744   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2745          "Bad index: %d", (int)index);
2746 
2747   return &_free_chunks[index];
2748 }
2749 
2750 // These methods that sum the free chunk lists are used in printing
2751 // methods that are used in product builds.
2752 size_t ChunkManager::sum_free_chunks() {
2753   assert_lock_strong(MetaspaceExpand_lock);
2754   size_t result = 0;
2755   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2756     ChunkList* list = free_chunks(i);
2757 
2758     if (list == NULL) {
2759       continue;
2760     }
2761 
2762     result = result + list->count() * list->size();
2763   }
2764   result = result + humongous_dictionary()->total_size();
2765   return result;
2766 }
2767 
2768 size_t ChunkManager::sum_free_chunks_count() {
2769   assert_lock_strong(MetaspaceExpand_lock);
2770   size_t count = 0;
2771   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2772     ChunkList* list = free_chunks(i);
2773     if (list == NULL) {
2774       continue;
2775     }
2776     count = count + list->count();
2777   }
2778   count = count + humongous_dictionary()->total_free_blocks();
2779   return count;
2780 }
2781 
2782 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2783   ChunkIndex index = list_index(word_size);
2784   assert(index < HumongousIndex, "No humongous list");
2785   return free_chunks(index);
2786 }
2787 
2788 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2789 // split up the larger chunk into n smaller chunks, at least one of which should be
2790 // the target chunk of target chunk size. The smaller chunks, including the target
2791 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2792 // Note that this chunk is supposed to be removed from the freelist right away.
2793 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2794   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2795 
2796   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2797   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2798 
2799   MetaWord* const region_start = (MetaWord*)larger_chunk;
2800   const size_t region_word_len = larger_chunk->word_size();
2801   MetaWord* const region_end = region_start + region_word_len;
2802   VirtualSpaceNode* const vsn = larger_chunk->container();
2803   OccupancyMap* const ocmap = vsn->occupancy_map();
2804 
2805   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2806   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2807   // at an address suitable to place the smaller target chunk.
2808   assert_is_aligned(region_start, target_chunk_word_size);
2809 
2810   // Remove old chunk.
2811   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2812   larger_chunk->remove_sentinel();
2813 
2814   // Prevent access to the old chunk from here on.
2815   larger_chunk = NULL;
2816   // ... and wipe it.
2817   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2818 
2819   // In its place create first the target chunk...
2820   MetaWord* p = region_start;
2821   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2822   assert(target_chunk == (Metachunk*)p, "Sanity");
2823   target_chunk->set_origin(origin_split);
2824 
2825   // Note: we do not need to mark its start in the occupancy map
2826   // because it coincides with the old chunk start.
2827 
2828   // Mark chunk as free and return to the freelist.
2829   do_update_in_use_info_for_chunk(target_chunk, false);
2830   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2831 
2832   // This chunk should now be valid and can be verified.
2833   DEBUG_ONLY(do_verify_chunk(target_chunk));
2834 
2835   // In the remaining space create the remainder chunks.
2836   p += target_chunk->word_size();
2837   assert(p < region_end, "Sanity");
2838 
2839   while (p < region_end) {
2840 
2841     // Find the largest chunk size which fits the alignment requirements at address p.
2842     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2843     size_t this_chunk_word_size = 0;
2844     for(;;) {
2845       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2846       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2847         break;
2848       } else {
2849         this_chunk_index = prev_chunk_index(this_chunk_index);
2850         assert(this_chunk_index >= target_chunk_index, "Sanity");
2851       }
2852     }
2853 
2854     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2855     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2856     assert(p + this_chunk_word_size <= region_end, "Sanity");
2857 
2858     // Create splitting chunk.
2859     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2860     assert(this_chunk == (Metachunk*)p, "Sanity");
2861     this_chunk->set_origin(origin_split);
2862     ocmap->set_chunk_starts_at_address(p, true);
2863     do_update_in_use_info_for_chunk(this_chunk, false);
2864 
2865     // This chunk should be valid and can be verified.
2866     DEBUG_ONLY(do_verify_chunk(this_chunk));
2867 
2868     // Return this chunk to freelist and correct counter.
2869     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2870     _free_chunks_count ++;
2871 
2872     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2873       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2874       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2875       p2i(region_start), p2i(region_end));
2876 
2877     p += this_chunk_word_size;
2878 
2879   }
2880 
2881   return target_chunk;
2882 }
2883 
2884 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2885   assert_lock_strong(MetaspaceExpand_lock);
2886 
2887   slow_locked_verify();
2888 
2889   Metachunk* chunk = NULL;
2890   bool we_did_split_a_chunk = false;
2891 
2892   if (list_index(word_size) != HumongousIndex) {
2893 
2894     ChunkList* free_list = find_free_chunks_list(word_size);
2895     assert(free_list != NULL, "Sanity check");
2896 
2897     chunk = free_list->head();
2898 
2899     if (chunk == NULL) {
2900       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2901       // This is the counterpart of the coalescing-upon-chunk-return.
2902 
2903       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2904 
2905       // Is there a larger chunk we could split?
2906       Metachunk* larger_chunk = NULL;
2907       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2908       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2909         larger_chunk = free_chunks(larger_chunk_index)->head();
2910         if (larger_chunk == NULL) {
2911           larger_chunk_index = next_chunk_index(larger_chunk_index);
2912         }
2913       }
2914 
2915       if (larger_chunk != NULL) {
2916         assert(larger_chunk->word_size() > word_size, "Sanity");
2917         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2918 
2919         // We found a larger chunk. Lets split it up:
2920         // - remove old chunk
2921         // - in its place, create new smaller chunks, with at least one chunk
2922         //   being of target size, the others sized as large as possible. This
2923         //   is to make sure the resulting chunks are "as coalesced as possible"
2924         //   (similar to VirtualSpaceNode::retire()).
2925         // Note: during this operation both ChunkManager and VirtualSpaceNode
2926         //  are temporarily invalid, so be careful with asserts.
2927 
2928         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2929            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2930           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2931           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2932 
2933         chunk = split_chunk(word_size, larger_chunk);
2934 
2935         // This should have worked.
2936         assert(chunk != NULL, "Sanity");
2937         assert(chunk->word_size() == word_size, "Sanity");
2938         assert(chunk->is_tagged_free(), "Sanity");
2939 
2940         we_did_split_a_chunk = true;
2941 
2942       }
2943     }
2944 
2945     if (chunk == NULL) {
2946       return NULL;
2947     }
2948 
2949     // Remove the chunk as the head of the list.
2950     free_list->remove_chunk(chunk);
2951 
2952     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2953                                        p2i(free_list), free_list->count());
2954 
2955   } else {
2956     chunk = humongous_dictionary()->get_chunk(word_size);
2957 
2958     if (chunk == NULL) {
2959       return NULL;
2960     }
2961 
2962     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2963                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2964   }
2965 
2966   // Chunk has been removed from the chunk manager; update counters.
2967   account_for_removed_chunk(chunk);
2968   do_update_in_use_info_for_chunk(chunk, true);
2969   chunk->container()->inc_container_count();
2970   chunk->inc_use_count();
2971 
2972   // Remove it from the links to this freelist
2973   chunk->set_next(NULL);
2974   chunk->set_prev(NULL);
2975 
2976   // Run some verifications (some more if we did a chunk split)
2977 #ifdef ASSERT
2978   if (VerifyMetaspace) {
2979     locked_verify();
2980     VirtualSpaceNode* const vsn = chunk->container();
2981     vsn->verify();
2982     if (we_did_split_a_chunk) {
2983       vsn->verify_free_chunks_are_ideally_merged();
2984     }
2985   }
2986 #endif
2987 
2988   return chunk;
2989 }
2990 
2991 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2992   assert_lock_strong(MetaspaceExpand_lock);
2993   slow_locked_verify();
2994 
2995   // Take from the beginning of the list
2996   Metachunk* chunk = free_chunks_get(word_size);
2997   if (chunk == NULL) {
2998     return NULL;
2999   }
3000 
3001   assert((word_size <= chunk->word_size()) ||
3002          (list_index(chunk->word_size()) == HumongousIndex),
3003          "Non-humongous variable sized chunk");
3004   LogTarget(Debug, gc, metaspace, freelist) lt;
3005   if (lt.is_enabled()) {
3006     size_t list_count;
3007     if (list_index(word_size) < HumongousIndex) {
3008       ChunkList* list = find_free_chunks_list(word_size);
3009       list_count = list->count();
3010     } else {
3011       list_count = humongous_dictionary()->total_count();
3012     }
3013     LogStream ls(lt);
3014     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3015              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3016     ResourceMark rm;
3017     locked_print_free_chunks(&ls);
3018   }
3019 
3020   return chunk;
3021 }
3022 
3023 void ChunkManager::return_single_chunk(Metachunk* chunk) {
3024   const ChunkIndex index = chunk->get_chunk_type();
3025   assert_lock_strong(MetaspaceExpand_lock);
3026   DEBUG_ONLY(do_verify_chunk(chunk);)
3027   assert(chunk != NULL, "Expected chunk.");
3028   assert(chunk->container() != NULL, "Container should have been set.");
3029   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3030   index_bounds_check(index);
3031 
3032   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3033   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3034   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3035   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3036 
3037   if (index != HumongousIndex) {
3038     // Return non-humongous chunk to freelist.
3039     ChunkList* list = free_chunks(index);
3040     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3041     list->return_chunk_at_head(chunk);
3042     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3043         chunk_size_name(index), p2i(chunk));
3044   } else {
3045     // Return humongous chunk to dictionary.
3046     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3047     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3048            "Humongous chunk has wrong alignment.");
3049     _humongous_dictionary.return_chunk(chunk);
3050     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3051         chunk_size_name(index), p2i(chunk), chunk->word_size());
3052   }
3053   chunk->container()->dec_container_count();
3054   do_update_in_use_info_for_chunk(chunk, false);
3055 
3056   // Chunk has been added; update counters.
3057   account_for_added_chunk(chunk);
3058 
3059   // Attempt coalesce returned chunks with its neighboring chunks:
3060   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3061   if (index == SmallIndex || index == SpecializedIndex) {
3062     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3063       // This did not work. But if this chunk is special, we still may form a small chunk?
3064       if (index == SpecializedIndex) {
3065         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3066           // give up.
3067         }
3068       }
3069     }
3070   }
3071 
3072 }
3073 
3074 void ChunkManager::return_chunk_list(Metachunk* chunks) {
3075   if (chunks == NULL) {
3076     return;
3077   }
3078   LogTarget(Trace, gc, metaspace, freelist) log;
3079   if (log.is_enabled()) { // tracing
3080     log.print("returning list of chunks...");
3081   }
3082   unsigned num_chunks_returned = 0;
3083   size_t size_chunks_returned = 0;
3084   Metachunk* cur = chunks;
3085   while (cur != NULL) {
3086     // Capture the next link before it is changed
3087     // by the call to return_chunk_at_head();
3088     Metachunk* next = cur->next();
3089     if (log.is_enabled()) { // tracing
3090       num_chunks_returned ++;
3091       size_chunks_returned += cur->word_size();
3092     }
3093     return_single_chunk(cur);
3094     cur = next;
3095   }
3096   if (log.is_enabled()) { // tracing
3097     log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".",
3098         num_chunks_returned, size_chunks_returned);
3099   }
3100 }
3101 
3102 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
3103   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3104   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3105     out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
3106   }
3107 }
3108 
3109 // SpaceManager methods
3110 
3111 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3112   size_t chunk_sizes[] = {
3113       specialized_chunk_size(is_class_space),
3114       small_chunk_size(is_class_space),
3115       medium_chunk_size(is_class_space)
3116   };
3117 
3118   // Adjust up to one of the fixed chunk sizes ...
3119   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3120     if (requested <= chunk_sizes[i]) {
3121       return chunk_sizes[i];
3122     }
3123   }
3124 
3125   // ... or return the size as a humongous chunk.
3126   return requested;
3127 }
3128 
3129 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3130   return adjust_initial_chunk_size(requested, is_class());
3131 }
3132 
3133 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3134   size_t requested;
3135 
3136   if (is_class()) {
3137     switch (type) {
3138     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3139     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3140     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3141     default:                                 requested = ClassSmallChunk; break;
3142     }
3143   } else {
3144     switch (type) {
3145     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3146     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3147     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3148     default:                                 requested = SmallChunk; break;
3149     }
3150   }
3151 
3152   // Adjust to one of the fixed chunk sizes (unless humongous)
3153   const size_t adjusted = adjust_initial_chunk_size(requested);
3154 
3155   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3156          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3157 
3158   return adjusted;
3159 }
3160 
3161 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3162 
3163   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3164     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
3165         num_chunks_by_type(i), chunk_size_name(i));
3166   }
3167 
3168   chunk_manager()->locked_print_free_chunks(st);
3169 }
3170 
3171 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3172 
3173   // Decide between a small chunk and a medium chunk.  Up to
3174   // _small_chunk_limit small chunks can be allocated.
3175   // After that a medium chunk is preferred.
3176   size_t chunk_word_size;
3177 
3178   // Special case for anonymous metadata space.
3179   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3180   // rarely about 4K (64-bits JVM).
3181   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3182   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3183   // reduces space waste from 60+% to around 30%.
3184   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3185       _mdtype == Metaspace::NonClassType &&
3186       num_chunks_by_type(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3187       word_size + Metachunk::overhead() <= SpecializedChunk) {
3188     return SpecializedChunk;
3189   }
3190 
3191   if (num_chunks_by_type(MediumIndex) == 0 &&
3192       num_chunks_by_type(SmallIndex) < _small_chunk_limit) {
3193     chunk_word_size = (size_t) small_chunk_size();
3194     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3195       chunk_word_size = medium_chunk_size();
3196     }
3197   } else {
3198     chunk_word_size = medium_chunk_size();
3199   }
3200 
3201   // Might still need a humongous chunk.  Enforce
3202   // humongous allocations sizes to be aligned up to
3203   // the smallest chunk size.
3204   size_t if_humongous_sized_chunk =
3205     align_up(word_size + Metachunk::overhead(),
3206                   smallest_chunk_size());
3207   chunk_word_size =
3208     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3209 
3210   assert(!SpaceManager::is_humongous(word_size) ||
3211          chunk_word_size == if_humongous_sized_chunk,
3212          "Size calculation is wrong, word_size " SIZE_FORMAT
3213          " chunk_word_size " SIZE_FORMAT,
3214          word_size, chunk_word_size);
3215   Log(gc, metaspace, alloc) log;
3216   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3217     log.debug("Metadata humongous allocation:");
3218     log.debug("  word_size " PTR_FORMAT, word_size);
3219     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3220     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3221   }
3222   return chunk_word_size;
3223 }
3224 
3225 void SpaceManager::track_metaspace_memory_usage() {
3226   if (is_init_completed()) {
3227     if (is_class()) {
3228       MemoryService::track_compressed_class_memory_usage();
3229     }
3230     MemoryService::track_metaspace_memory_usage();
3231   }
3232 }
3233 
3234 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3235   assert_lock_strong(_lock);
3236   assert(vs_list()->current_virtual_space() != NULL,
3237          "Should have been set");
3238   assert(current_chunk() == NULL ||
3239          current_chunk()->allocate(word_size) == NULL,
3240          "Don't need to expand");
3241   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3242 
3243   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3244     size_t words_left = 0;
3245     size_t words_used = 0;
3246     if (current_chunk() != NULL) {
3247       words_left = current_chunk()->free_word_size();
3248       words_used = current_chunk()->used_word_size();
3249     }
3250     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3251                                        word_size, words_used, words_left);
3252   }
3253 
3254   // Get another chunk
3255   size_t chunk_word_size = calc_chunk_size(word_size);
3256   Metachunk* next = get_new_chunk(chunk_word_size);
3257 
3258   MetaWord* mem = NULL;
3259 
3260   // If a chunk was available, add it to the in-use chunk list
3261   // and do an allocation from it.
3262   if (next != NULL) {
3263     // Add to this manager's list of chunks in use.
3264     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3265     // case it usually makes no sense to make it the current chunk, since the next allocation would
3266     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3267     // good chunk which could be used for more normal allocations.
3268     bool make_current = true;
3269     if (next->get_chunk_type() == HumongousIndex &&
3270         current_chunk() != NULL) {
3271       make_current = false;
3272     }
3273     add_chunk(next, make_current);
3274     mem = next->allocate(word_size);
3275   }
3276 
3277   // Track metaspace memory usage statistic.
3278   track_metaspace_memory_usage();
3279 
3280   return mem;
3281 }
3282 
3283 void SpaceManager::print_on(outputStream* st) const {
3284   SpaceManagerStatistics stat;
3285   add_to_statistics(&stat); // will lock _lock.
3286   stat.print_on(st, 1*K, false);
3287 }
3288 
3289 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3290                            Metaspace::MetaspaceType space_type,
3291                            Mutex* lock) :
3292   _mdtype(mdtype),
3293   _space_type(space_type),
3294   _capacity_words(0),
3295   _used_words(0),
3296   _overhead_words(0),
3297   _block_freelists(NULL),
3298   _lock(lock),
3299   _chunk_list(NULL),
3300   _current_chunk(NULL)
3301 {
3302   Metadebug::init_allocation_fail_alot_count();
3303   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
3304   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3305 }
3306 
3307 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
3308 
3309   assert_lock_strong(MetaspaceExpand_lock);
3310 
3311   _capacity_words += new_chunk->word_size();
3312   _overhead_words += Metachunk::overhead();
3313   DEBUG_ONLY(new_chunk->verify());
3314   _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
3315 
3316   // Adjust global counters:
3317   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
3318   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
3319 }
3320 
3321 void SpaceManager::account_for_allocation(size_t words) {
3322   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
3323   // We may or may not be locked with the global metaspace expansion lock.
3324   assert_lock_strong(lock());
3325 
3326   // Add to the per SpaceManager totals. This can be done non-atomically.
3327   _used_words += words;
3328 
3329   // Adjust global counters. This will be done atomically.
3330   MetaspaceUtils::inc_used(mdtype(), words);
3331 }
3332 
3333 void SpaceManager::account_for_spacemanager_death() {
3334 
3335   assert_lock_strong(MetaspaceExpand_lock);
3336 
3337   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
3338   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
3339   MetaspaceUtils::dec_used(mdtype(), _used_words);
3340 }
3341 
3342 SpaceManager::~SpaceManager() {
3343 
3344   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3345   DEBUG_ONLY(verify_metrics());
3346 
3347   MutexLockerEx fcl(MetaspaceExpand_lock,
3348                     Mutex::_no_safepoint_check_flag);
3349 
3350   chunk_manager()->slow_locked_verify();
3351 
3352   account_for_spacemanager_death();
3353 
3354   Log(gc, metaspace, freelist) log;
3355   if (log.is_trace()) {
3356     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3357     ResourceMark rm;
3358     LogStream ls(log.trace());
3359     locked_print_chunks_in_use_on(&ls);
3360     if (block_freelists() != NULL) {
3361       block_freelists()->print_on(&ls);
3362     }
3363   }
3364 
3365   // Add all the chunks in use by this space manager
3366   // to the global list of free chunks.
3367 
3368   // Follow each list of chunks-in-use and add them to the
3369   // free lists.  Each list is NULL terminated.
3370   chunk_manager()->return_chunk_list(chunk_list());
3371 #ifdef ASSERT
3372   _chunk_list = NULL;
3373   _current_chunk = NULL;
3374 #endif
3375 
3376   chunk_manager()->slow_locked_verify();
3377 
3378   if (_block_freelists != NULL) {
3379     delete _block_freelists;
3380   }
3381 }
3382 
3383 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3384   assert_lock_strong(lock());
3385   // Allocations and deallocations are in raw_word_size
3386   size_t raw_word_size = get_allocation_word_size(word_size);
3387   // Lazily create a block_freelist
3388   if (block_freelists() == NULL) {
3389     _block_freelists = new BlockFreelist();
3390   }
3391   block_freelists()->return_block(p, raw_word_size);
3392   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
3393 }
3394 
3395 // Adds a chunk to the list of chunks in use.
3396 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3397 
3398   assert_lock_strong(_lock);
3399   assert(new_chunk != NULL, "Should not be NULL");
3400   assert(new_chunk->next() == NULL, "Should not be on a list");
3401 
3402   new_chunk->reset_empty();
3403 
3404   // Find the correct list and and set the current
3405   // chunk for that list.
3406   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3407 
3408   if (make_current) {
3409     // If we are to make the chunk current, retire the old current chunk and replace
3410     // it with the new chunk.
3411     retire_current_chunk();
3412     set_current_chunk(new_chunk);
3413   }
3414 
3415   // Add the new chunk at the head of its respective chunk list.
3416   new_chunk->set_next(_chunk_list);
3417   _chunk_list = new_chunk;
3418 
3419   // Adjust counters.
3420   account_for_new_chunk(new_chunk);
3421 
3422   assert(new_chunk->is_empty(), "Not ready for reuse");
3423   Log(gc, metaspace, freelist) log;
3424   if (log.is_trace()) {
3425     log.trace("SpaceManager::added chunk: ");
3426     ResourceMark rm;
3427     LogStream ls(log.trace());
3428     new_chunk->print_on(&ls);
3429     chunk_manager()->locked_print_free_chunks(&ls);
3430   }
3431 }
3432 
3433 void SpaceManager::retire_current_chunk() {
3434   if (current_chunk() != NULL) {
3435     size_t remaining_words = current_chunk()->free_word_size();
3436     if (remaining_words >= SmallBlocks::small_block_min_size()) {
3437       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3438       deallocate(ptr, remaining_words);
3439       account_for_allocation(remaining_words);
3440     }
3441   }
3442 }
3443 
3444 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3445   // Get a chunk from the chunk freelist
3446   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3447 
3448   if (next == NULL) {
3449     next = vs_list()->get_new_chunk(chunk_word_size,
3450                                     medium_chunk_bunch());
3451   }
3452 
3453   Log(gc, metaspace, alloc) log;
3454   if (log.is_debug() && next != NULL &&
3455       SpaceManager::is_humongous(next->word_size())) {
3456     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3457   }
3458 
3459   return next;
3460 }
3461 
3462 MetaWord* SpaceManager::allocate(size_t word_size) {
3463   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3464   size_t raw_word_size = get_allocation_word_size(word_size);
3465   BlockFreelist* fl =  block_freelists();
3466   MetaWord* p = NULL;
3467 
3468   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
3469 
3470   // Allocation from the dictionary is expensive in the sense that
3471   // the dictionary has to be searched for a size.  Don't allocate
3472   // from the dictionary until it starts to get fat.  Is this
3473   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3474   // for allocations.  Do some profiling.  JJJ
3475   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3476     p = fl->get_block(raw_word_size);
3477     if (p != NULL) {
3478       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3479     }
3480   }
3481   if (p == NULL) {
3482     p = allocate_work(raw_word_size);
3483   }
3484 
3485   return p;
3486 }
3487 
3488 // Returns the address of spaced allocated for "word_size".
3489 // This methods does not know about blocks (Metablocks)
3490 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3491   assert_lock_strong(lock());
3492 #ifdef ASSERT
3493   if (Metadebug::test_metadata_failure()) {
3494     return NULL;
3495   }
3496 #endif
3497   // Is there space in the current chunk?
3498   MetaWord* result = NULL;
3499 
3500   if (current_chunk() != NULL) {
3501     result = current_chunk()->allocate(word_size);
3502   }
3503 
3504   if (result == NULL) {
3505     result = grow_and_allocate(word_size);
3506   }
3507 
3508   if (result != NULL) {
3509     account_for_allocation(word_size);
3510   }
3511 
3512   return result;
3513 }
3514 
3515 void SpaceManager::verify() {
3516   Metachunk* curr = chunk_list();
3517   while (curr != NULL) {
3518     DEBUG_ONLY(do_verify_chunk(curr);)
3519     assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3520     curr = curr->next();
3521   }
3522 }
3523 
3524 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3525   assert(is_humongous(chunk->word_size()) ||
3526          chunk->word_size() == medium_chunk_size() ||
3527          chunk->word_size() == small_chunk_size() ||
3528          chunk->word_size() == specialized_chunk_size(),
3529          "Chunk size is wrong");
3530   return;
3531 }
3532 
3533 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3534   assert_lock_strong(lock());
3535   Metachunk* chunk = chunk_list();
3536   while (chunk != NULL) {
3537     UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
3538     chunk_stat.add_num(1);
3539     chunk_stat.add_cap(chunk->word_size());
3540     chunk_stat.add_overhead(Metachunk::overhead());
3541     chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
3542     if (chunk != current_chunk()) {
3543       chunk_stat.add_waste(chunk->free_word_size());
3544     } else {
3545       chunk_stat.add_free(chunk->free_word_size());
3546     }
3547     chunk = chunk->next();
3548   }
3549   if (block_freelists() != NULL) {
3550     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
3551   }
3552 }
3553 
3554 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3555   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3556   add_to_statistics_locked(out);
3557 }
3558 
3559 #ifdef ASSERT
3560 void SpaceManager::verify_metrics_locked() const {
3561   assert_lock_strong(lock());
3562 
3563   SpaceManagerStatistics stat;
3564   add_to_statistics_locked(&stat);
3565 
3566   UsedChunksStatistics chunk_stats = stat.totals();
3567 
3568   DEBUG_ONLY(chunk_stats.check_sanity());
3569 
3570   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
3571   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
3572   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
3573 }
3574 
3575 void SpaceManager::verify_metrics() const {
3576   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3577   verify_metrics_locked();
3578 }
3579 #endif // ASSERT
3580 
3581 
3582 
3583 // MetaspaceUtils
3584 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
3585 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
3586 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
3587 
3588 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
3589 // output will be the accumulated values for all live metaspaces.
3590 // Note: method does not do any locking.
3591 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
3592   out->reset();
3593   ClassLoaderDataGraphMetaspaceIterator iter;
3594    while (iter.repeat()) {
3595      ClassLoaderMetaspace* msp = iter.get_next();
3596      if (msp != NULL) {
3597        msp->add_to_statistics(out);
3598      }
3599    }
3600 }
3601 
3602 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
3603   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3604   return list == NULL ? 0 : list->free_bytes();
3605 }
3606 
3607 size_t MetaspaceUtils::free_in_vs_bytes() {
3608   return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
3609 }
3610 
3611 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
3612   assert_lock_strong(MetaspaceExpand_lock);
3613   (*pstat) += words;
3614 }
3615 
3616 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
3617   assert_lock_strong(MetaspaceExpand_lock);
3618   const size_t size_now = *pstat;
3619   assert(size_now >= words, "About to decrement counter below zero "
3620          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3621          size_now, words);
3622   *pstat = size_now - words;
3623 }
3624 
3625 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
3626   Atomic::add(words, pstat);
3627 }
3628 
3629 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
3630   const size_t size_now = *pstat;
3631   assert(size_now >= words, "About to decrement counter below zero "
3632          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3633          size_now, words);
3634   Atomic::sub(words, pstat);
3635 }
3636 
3637 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3638   dec_stat_nonatomically(&_capacity_words[mdtype], words);
3639 }
3640 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3641   inc_stat_nonatomically(&_capacity_words[mdtype], words);
3642 }
3643 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3644   dec_stat_atomically(&_used_words[mdtype], words);
3645 }
3646 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3647   inc_stat_atomically(&_used_words[mdtype], words);
3648 }
3649 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
3650   dec_stat_nonatomically(&_overhead_words[mdtype], words);
3651 }
3652 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
3653   inc_stat_nonatomically(&_overhead_words[mdtype], words);
3654 }
3655 
3656 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3657   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3658   return list == NULL ? 0 : list->reserved_bytes();
3659 }
3660 
3661 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3662   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3663   return list == NULL ? 0 : list->committed_bytes();
3664 }
3665 
3666 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3667 
3668 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3669   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3670   if (chunk_manager == NULL) {
3671     return 0;
3672   }
3673   chunk_manager->slow_verify();
3674   return chunk_manager->free_chunks_total_words();
3675 }
3676 
3677 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3678   return free_chunks_total_words(mdtype) * BytesPerWord;
3679 }
3680 
3681 size_t MetaspaceUtils::free_chunks_total_words() {
3682   return free_chunks_total_words(Metaspace::ClassType) +
3683          free_chunks_total_words(Metaspace::NonClassType);
3684 }
3685 
3686 size_t MetaspaceUtils::free_chunks_total_bytes() {
3687   return free_chunks_total_words() * BytesPerWord;
3688 }
3689 
3690 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3691   return Metaspace::get_chunk_manager(mdtype) != NULL;
3692 }
3693 
3694 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3695   if (!has_chunk_free_list(mdtype)) {
3696     return MetaspaceChunkFreeListSummary();
3697   }
3698 
3699   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3700   return cm->chunk_free_list_summary();
3701 }
3702 
3703 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3704   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3705                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3706 }
3707 
3708 void MetaspaceUtils::print_on(outputStream* out) {
3709   Metaspace::MetadataType nct = Metaspace::NonClassType;
3710 
3711   out->print_cr(" Metaspace       "
3712                 "used "      SIZE_FORMAT "K, "
3713                 "capacity "  SIZE_FORMAT "K, "
3714                 "committed " SIZE_FORMAT "K, "
3715                 "reserved "  SIZE_FORMAT "K",
3716                 used_bytes()/K,
3717                 capacity_bytes()/K,
3718                 committed_bytes()/K,
3719                 reserved_bytes()/K);
3720 
3721   if (Metaspace::using_class_space()) {
3722     Metaspace::MetadataType ct = Metaspace::ClassType;
3723     out->print_cr("  class space    "
3724                   "used "      SIZE_FORMAT "K, "
3725                   "capacity "  SIZE_FORMAT "K, "
3726                   "committed " SIZE_FORMAT "K, "
3727                   "reserved "  SIZE_FORMAT "K",
3728                   used_bytes(ct)/K,
3729                   capacity_bytes(ct)/K,
3730                   committed_bytes(ct)/K,
3731                   reserved_bytes(ct)/K);
3732   }
3733 }
3734 
3735 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
3736 private:
3737   outputStream* const _out;
3738   const size_t        _scale;
3739   const bool          _do_print;
3740   const bool          _break_down_by_chunktype;
3741 
3742 public:
3743 
3744   uintx                           _num_loaders;
3745   ClassLoaderMetaspaceStatistics  _stats_total;
3746 
3747   uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
3748   ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
3749 
3750 public:
3751   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
3752     : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
3753     , _num_loaders(0)
3754   {
3755     memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
3756   }
3757 
3758   void do_cld(ClassLoaderData* cld) {
3759 
3760     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3761 
3762     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
3763     if (msp == NULL) {
3764       return;
3765     }
3766 
3767     // Collect statistics for this class loader metaspace
3768     ClassLoaderMetaspaceStatistics this_cld_stat;
3769     msp->add_to_statistics(&this_cld_stat);
3770 
3771     // And add it to the running totals
3772     _stats_total.add(this_cld_stat);
3773     _num_loaders ++;
3774     _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
3775     _num_loaders_by_spacetype[msp->space_type()] ++;
3776 
3777     // Optionally, print.
3778     if (_do_print) {
3779 
3780       _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
3781 
3782       if (cld->is_anonymous()) {
3783         _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
3784       } else {
3785         ResourceMark rm;
3786         _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
3787       }
3788 
3789       if (cld->is_unloading()) {
3790         _out->print(" (unloading)");
3791       }
3792 
3793       this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
3794       _out->cr();
3795 
3796     }
3797 
3798   } // do_cld
3799 
3800 };
3801 
3802 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
3803   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3804   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3805   {
3806     if (Metaspace::using_class_space()) {
3807       out->print("  Non-class space:  ");
3808     }
3809     print_scaled_words(out, reserved_nonclass_words, scale, 7);
3810     out->print(" reserved, ");
3811     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
3812     out->print_cr(" committed ");
3813 
3814     if (Metaspace::using_class_space()) {
3815       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3816       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3817       out->print("      Class space:  ");
3818       print_scaled_words(out, reserved_class_words, scale, 7);
3819       out->print(" reserved, ");
3820       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
3821       out->print_cr(" committed ");
3822 
3823       const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
3824       const size_t committed_words = committed_nonclass_words + committed_class_words;
3825       out->print("             Both:  ");
3826       print_scaled_words(out, reserved_words, scale, 7);
3827       out->print(" reserved, ");
3828       print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
3829       out->print_cr(" committed ");
3830     }
3831   }
3832 }
3833 
3834 // This will print out a basic metaspace usage report but
3835 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
3836 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
3837 
3838   out->cr();
3839   out->print_cr("Usage:");
3840 
3841   if (Metaspace::using_class_space()) {
3842     out->print("  Non-class:  ");
3843   }
3844 
3845   // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
3846   // MetaspaceUtils.
3847   const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
3848   const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
3849   const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
3850   const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
3851 
3852   print_scaled_words(out, cap_nc, scale, 5);
3853   out->print(" capacity, ");
3854   print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
3855   out->print(" used, ");
3856   print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
3857   out->print(" free+waste, ");
3858   print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
3859   out->print(" overhead. ");
3860   out->cr();
3861 
3862   if (Metaspace::using_class_space()) {
3863     const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
3864     const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
3865     const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
3866     const size_t free_and_waste_c = cap_c - overhead_c - used_c;
3867     out->print("      Class:  ");
3868     print_scaled_words(out, cap_c, scale, 5);
3869     out->print(" capacity, ");
3870     print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
3871     out->print(" used, ");
3872     print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
3873     out->print(" free+waste, ");
3874     print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
3875     out->print(" overhead. ");
3876     out->cr();
3877 
3878     out->print("       Both:  ");
3879     const size_t cap = cap_nc + cap_c;
3880 
3881     print_scaled_words(out, cap, scale, 5);
3882     out->print(" capacity, ");
3883     print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
3884     out->print(" used, ");
3885     print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
3886     out->print(" free+waste, ");
3887     print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
3888     out->print(" overhead. ");
3889     out->cr();
3890   }
3891 
3892   out->cr();
3893   out->print_cr("Virtual space:");
3894 
3895   print_vs(out, scale);
3896 
3897   out->cr();
3898   out->print_cr("Chunk freelists:");
3899 
3900   if (Metaspace::using_class_space()) {
3901     out->print("   Non-Class:  ");
3902   }
3903   print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3904   out->cr();
3905   if (Metaspace::using_class_space()) {
3906     out->print("       Class:  ");
3907     print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
3908     out->cr();
3909     out->print("        Both:  ");
3910     print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words() +
3911                               Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3912     out->cr();
3913   }
3914   out->cr();
3915 
3916 }
3917 
3918 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
3919 
3920   const bool print_loaders = (flags & rf_show_loaders) > 0;
3921   const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
3922   const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
3923 
3924   // Some report options require walking the class loader data graph.
3925   PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
3926   if (print_loaders) {
3927     out->cr();
3928     out->print_cr("Usage per loader:");
3929     out->cr();
3930   }
3931 
3932   ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
3933 
3934   // Print totals, broken up by space type.
3935   if (print_by_spacetype) {
3936     out->cr();
3937     out->print_cr("Usage per space type:");
3938     out->cr();
3939     for (int space_type = (int)Metaspace::ZeroMetaspaceType;
3940          space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
3941     {
3942       uintx num = cl._num_loaders_by_spacetype[space_type];
3943       out->print("%s (" UINTX_FORMAT " loader%s)%c",
3944         space_type_name((Metaspace::MetaspaceType)space_type),
3945         num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
3946       if (num > 0) {
3947         cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
3948       }
3949       out->cr();
3950     }
3951   }
3952 
3953   // Print totals for in-use data:
3954   out->cr();
3955   out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
3956       cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
3957 
3958   cl._stats_total.print_on(out, scale, print_by_chunktype);
3959 
3960   // -- Print Virtual space.
3961   out->cr();
3962   out->print_cr("Virtual space:");
3963 
3964   print_vs(out, scale);
3965 
3966   // -- Print VirtualSpaceList details.
3967   if ((flags & rf_show_vslist) > 0) {
3968     out->cr();
3969     out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
3970 
3971     if (Metaspace::using_class_space()) {
3972       out->print_cr("   Non-Class:");
3973     }
3974     Metaspace::space_list()->print_on(out, scale);
3975     if (Metaspace::using_class_space()) {
3976       out->print_cr("       Class:");
3977       Metaspace::class_space_list()->print_on(out, scale);
3978     }
3979   }
3980   out->cr();
3981 
3982   // -- Print VirtualSpaceList map.
3983   if ((flags & rf_show_vsmap) > 0) {
3984     out->cr();
3985     out->print_cr("Virtual space map:");
3986 
3987     if (Metaspace::using_class_space()) {
3988       out->print_cr("   Non-Class:");
3989     }
3990     Metaspace::space_list()->print_map(out);
3991     if (Metaspace::using_class_space()) {
3992       out->print_cr("       Class:");
3993       Metaspace::class_space_list()->print_map(out);
3994     }
3995   }
3996   out->cr();
3997 
3998   // -- Print Freelists (ChunkManager) details
3999   out->cr();
4000   out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
4001 
4002   ChunkManagerStatistics non_class_cm_stat;
4003   Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
4004 
4005   if (Metaspace::using_class_space()) {
4006     out->print_cr("   Non-Class:");
4007   }
4008   non_class_cm_stat.print_on(out, scale);
4009 
4010   if (Metaspace::using_class_space()) {
4011     ChunkManagerStatistics class_cm_stat;
4012     Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
4013     out->print_cr("       Class:");
4014     class_cm_stat.print_on(out, scale);
4015   }
4016 
4017   // As a convenience, print a summary of common waste.
4018   out->cr();
4019   out->print("Waste ");
4020   // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
4021   const size_t committed_words = committed_bytes() / BytesPerWord;
4022 
4023   out->print("(percentages refer to total committed size ");
4024   print_scaled_words(out, committed_words, scale);
4025   out->print_cr("):");
4026 
4027   // Print space committed but not yet used by any class loader
4028   const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
4029   out->print("              Committed unused: ");
4030   print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
4031   out->cr();
4032 
4033   // Print waste for in-use chunks.
4034   UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
4035   UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
4036   UsedChunksStatistics ucs_all;
4037   ucs_all.add(ucs_nonclass);
4038   ucs_all.add(ucs_class);
4039 
4040   out->print("        Waste in chunks in use: ");
4041   print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
4042   out->cr();
4043   out->print("         Free in chunks in use: ");
4044   print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
4045   out->cr();
4046   out->print("     Overhead in chunks in use: ");
4047   print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
4048   out->cr();
4049 
4050   // Print waste in free chunks.
4051   const size_t total_capacity_in_free_chunks =
4052       Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
4053      (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
4054   out->print("                In free chunks: ");
4055   print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
4056   out->cr();
4057 
4058   // Print waste in deallocated blocks.
4059   const uintx free_blocks_num =
4060       cl._stats_total.nonclass_sm_stats().free_blocks_num() +
4061       cl._stats_total.class_sm_stats().free_blocks_num();
4062   const size_t free_blocks_cap_words =
4063       cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
4064       cl._stats_total.class_sm_stats().free_blocks_cap_words();
4065   out->print("Deallocated from chunks in use: ");
4066   print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
4067   out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
4068   out->cr();
4069 
4070   // Print total waste.
4071   const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
4072       + free_blocks_cap_words + unused_words_in_vs;
4073   out->print("                       -total-: ");
4074   print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
4075   out->cr();
4076 
4077   // Print internal statistics
4078 #ifdef ASSERT
4079   out->cr();
4080   out->cr();
4081   out->print_cr("Internal statistics:");
4082   out->cr();
4083   out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
4084   out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
4085   out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
4086   out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
4087   out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
4088   out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
4089   out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
4090   out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
4091   out->cr();
4092 #endif
4093 
4094   // Print some interesting settings
4095   out->cr();
4096   out->cr();
4097   out->print("MaxMetaspaceSize: ");
4098   print_human_readable_size(out, MaxMetaspaceSize, scale);
4099   out->cr();
4100   out->print("InitialBootClassLoaderMetaspaceSize: ");
4101   print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
4102   out->cr();
4103 
4104   out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
4105   out->cr();
4106   if (Metaspace::using_class_space()) {
4107     out->print("CompressedClassSpaceSize: ");
4108     print_human_readable_size(out, CompressedClassSpaceSize, scale);
4109   }
4110 
4111   out->cr();
4112   out->cr();
4113 
4114 } // MetaspaceUtils::print_report()
4115 
4116 // Prints an ASCII representation of the given space.
4117 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4118   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4119   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4120   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4121   if (vsl != NULL) {
4122     if (for_class) {
4123       if (!Metaspace::using_class_space()) {
4124         out->print_cr("No Class Space.");
4125         return;
4126       }
4127       out->print_raw("---- Metaspace Map (Class Space) ----");
4128     } else {
4129       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4130     }
4131     // Print legend:
4132     out->cr();
4133     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4134     out->cr();
4135     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4136     vsl->print_map(out);
4137     out->cr();
4138   }
4139 }
4140 
4141 void MetaspaceUtils::verify_free_chunks() {
4142   Metaspace::chunk_manager_metadata()->verify();
4143   if (Metaspace::using_class_space()) {
4144     Metaspace::chunk_manager_class()->verify();
4145   }
4146 }
4147 
4148 void MetaspaceUtils::verify_metrics() {
4149 #ifdef ASSERT
4150   // Please note: there are time windows where the internal counters are out of sync with
4151   // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
4152   // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
4153   // not be counted when iterating the CLDG. So be careful when you call this method.
4154   ClassLoaderMetaspaceStatistics total_stat;
4155   collect_statistics(&total_stat);
4156   UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
4157   UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
4158 
4159   bool mismatch = false;
4160   for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
4161     Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
4162     UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
4163     if (capacity_words(mdtype) != chunk_stat.cap() ||
4164         used_words(mdtype) != chunk_stat.used() ||
4165         overhead_words(mdtype) != chunk_stat.overhead()) {
4166       mismatch = true;
4167       tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
4168       tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4169                     capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
4170       tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4171                     chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
4172       tty->flush();
4173     }
4174   }
4175   assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
4176 #endif
4177 }
4178 
4179 
4180 // Metaspace methods
4181 
4182 size_t Metaspace::_first_chunk_word_size = 0;
4183 size_t Metaspace::_first_class_chunk_word_size = 0;
4184 
4185 size_t Metaspace::_commit_alignment = 0;
4186 size_t Metaspace::_reserve_alignment = 0;
4187 
4188 VirtualSpaceList* Metaspace::_space_list = NULL;
4189 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4190 
4191 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4192 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4193 
4194 #define VIRTUALSPACEMULTIPLIER 2
4195 
4196 #ifdef _LP64
4197 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4198 
4199 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4200   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4201   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4202   // narrow_klass_base is the lower of the metaspace base and the cds base
4203   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4204   // between the lower base and higher address.
4205   address lower_base;
4206   address higher_address;
4207 #if INCLUDE_CDS
4208   if (UseSharedSpaces) {
4209     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4210                           (address)(metaspace_base + compressed_class_space_size()));
4211     lower_base = MIN2(metaspace_base, cds_base);
4212   } else
4213 #endif
4214   {
4215     higher_address = metaspace_base + compressed_class_space_size();
4216     lower_base = metaspace_base;
4217 
4218     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4219     // If compressed class space fits in lower 32G, we don't need a base.
4220     if (higher_address <= (address)klass_encoding_max) {
4221       lower_base = 0; // Effectively lower base is zero.
4222     }
4223   }
4224 
4225   Universe::set_narrow_klass_base(lower_base);
4226 
4227   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4228   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4229   // how dump time narrow_klass_shift is set. Although, CDS can work
4230   // with zero-shift mode also, to be consistent with AOT it uses
4231   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4232   // can be used at same time as AOT code.
4233   if (!UseSharedSpaces
4234       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4235     Universe::set_narrow_klass_shift(0);
4236   } else {
4237     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4238   }
4239   AOTLoader::set_narrow_klass_shift();
4240 }
4241 
4242 #if INCLUDE_CDS
4243 // Return TRUE if the specified metaspace_base and cds_base are close enough
4244 // to work with compressed klass pointers.
4245 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4246   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4247   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4248   address lower_base = MIN2((address)metaspace_base, cds_base);
4249   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4250                                 (address)(metaspace_base + compressed_class_space_size()));
4251   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4252 }
4253 #endif
4254 
4255 // Try to allocate the metaspace at the requested addr.
4256 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4257   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4258   assert(using_class_space(), "called improperly");
4259   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4260   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4261          "Metaspace size is too big");
4262   assert_is_aligned(requested_addr, _reserve_alignment);
4263   assert_is_aligned(cds_base, _reserve_alignment);
4264   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4265 
4266   // Don't use large pages for the class space.
4267   bool large_pages = false;
4268 
4269 #if !(defined(AARCH64) || defined(AIX))
4270   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4271                                              _reserve_alignment,
4272                                              large_pages,
4273                                              requested_addr);
4274 #else // AARCH64
4275   ReservedSpace metaspace_rs;
4276 
4277   // Our compressed klass pointers may fit nicely into the lower 32
4278   // bits.
4279   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4280     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4281                                  _reserve_alignment,
4282                                  large_pages,
4283                                  requested_addr);
4284   }
4285 
4286   if (! metaspace_rs.is_reserved()) {
4287     // Aarch64: Try to align metaspace so that we can decode a compressed
4288     // klass with a single MOVK instruction.  We can do this iff the
4289     // compressed class base is a multiple of 4G.
4290     // Aix: Search for a place where we can find memory. If we need to load
4291     // the base, 4G alignment is helpful, too.
4292     size_t increment = AARCH64_ONLY(4*)G;
4293     for (char *a = align_up(requested_addr, increment);
4294          a < (char*)(1024*G);
4295          a += increment) {
4296       if (a == (char *)(32*G)) {
4297         // Go faster from here on. Zero-based is no longer possible.
4298         increment = 4*G;
4299       }
4300 
4301 #if INCLUDE_CDS
4302       if (UseSharedSpaces
4303           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4304         // We failed to find an aligned base that will reach.  Fall
4305         // back to using our requested addr.
4306         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4307                                      _reserve_alignment,
4308                                      large_pages,
4309                                      requested_addr);
4310         break;
4311       }
4312 #endif
4313 
4314       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4315                                    _reserve_alignment,
4316                                    large_pages,
4317                                    a);
4318       if (metaspace_rs.is_reserved())
4319         break;
4320     }
4321   }
4322 
4323 #endif // AARCH64
4324 
4325   if (!metaspace_rs.is_reserved()) {
4326 #if INCLUDE_CDS
4327     if (UseSharedSpaces) {
4328       size_t increment = align_up(1*G, _reserve_alignment);
4329 
4330       // Keep trying to allocate the metaspace, increasing the requested_addr
4331       // by 1GB each time, until we reach an address that will no longer allow
4332       // use of CDS with compressed klass pointers.
4333       char *addr = requested_addr;
4334       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4335              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4336         addr = addr + increment;
4337         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4338                                      _reserve_alignment, large_pages, addr);
4339       }
4340     }
4341 #endif
4342     // If no successful allocation then try to allocate the space anywhere.  If
4343     // that fails then OOM doom.  At this point we cannot try allocating the
4344     // metaspace as if UseCompressedClassPointers is off because too much
4345     // initialization has happened that depends on UseCompressedClassPointers.
4346     // So, UseCompressedClassPointers cannot be turned off at this point.
4347     if (!metaspace_rs.is_reserved()) {
4348       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4349                                    _reserve_alignment, large_pages);
4350       if (!metaspace_rs.is_reserved()) {
4351         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4352                                               compressed_class_space_size()));
4353       }
4354     }
4355   }
4356 
4357   // If we got here then the metaspace got allocated.
4358   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4359 
4360 #if INCLUDE_CDS
4361   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4362   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4363     FileMapInfo::stop_sharing_and_unmap(
4364         "Could not allocate metaspace at a compatible address");
4365   }
4366 #endif
4367   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4368                                   UseSharedSpaces ? (address)cds_base : 0);
4369 
4370   initialize_class_space(metaspace_rs);
4371 
4372   LogTarget(Trace, gc, metaspace) lt;
4373   if (lt.is_enabled()) {
4374     ResourceMark rm;
4375     LogStream ls(lt);
4376     print_compressed_class_space(&ls, requested_addr);
4377   }
4378 }
4379 
4380 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4381   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4382                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4383   if (_class_space_list != NULL) {
4384     address base = (address)_class_space_list->current_virtual_space()->bottom();
4385     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4386                  compressed_class_space_size(), p2i(base));
4387     if (requested_addr != 0) {
4388       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4389     }
4390     st->cr();
4391   }
4392 }
4393 
4394 // For UseCompressedClassPointers the class space is reserved above the top of
4395 // the Java heap.  The argument passed in is at the base of the compressed space.
4396 void Metaspace::initialize_class_space(ReservedSpace rs) {
4397   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4398   assert(rs.size() >= CompressedClassSpaceSize,
4399          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4400   assert(using_class_space(), "Must be using class space");
4401   _class_space_list = new VirtualSpaceList(rs);
4402   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4403 
4404   if (!_class_space_list->initialization_succeeded()) {
4405     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4406   }
4407 }
4408 
4409 #endif
4410 
4411 void Metaspace::ergo_initialize() {
4412   if (DumpSharedSpaces) {
4413     // Using large pages when dumping the shared archive is currently not implemented.
4414     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4415   }
4416 
4417   size_t page_size = os::vm_page_size();
4418   if (UseLargePages && UseLargePagesInMetaspace) {
4419     page_size = os::large_page_size();
4420   }
4421 
4422   _commit_alignment  = page_size;
4423   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4424 
4425   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4426   // override if MaxMetaspaceSize was set on the command line or not.
4427   // This information is needed later to conform to the specification of the
4428   // java.lang.management.MemoryUsage API.
4429   //
4430   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4431   // globals.hpp to the aligned value, but this is not possible, since the
4432   // alignment depends on other flags being parsed.
4433   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4434 
4435   if (MetaspaceSize > MaxMetaspaceSize) {
4436     MetaspaceSize = MaxMetaspaceSize;
4437   }
4438 
4439   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4440 
4441   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4442 
4443   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4444   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4445 
4446   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4447 
4448   // Initial virtual space size will be calculated at global_initialize()
4449   size_t min_metaspace_sz =
4450       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4451   if (UseCompressedClassPointers) {
4452     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4453       if (min_metaspace_sz >= MaxMetaspaceSize) {
4454         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4455       } else {
4456         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4457                       MaxMetaspaceSize - min_metaspace_sz);
4458       }
4459     }
4460   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4461     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4462                   min_metaspace_sz);
4463   }
4464 
4465   set_compressed_class_space_size(CompressedClassSpaceSize);
4466 }
4467 
4468 void Metaspace::global_initialize() {
4469   MetaspaceGC::initialize();
4470 
4471 #if INCLUDE_CDS
4472   if (DumpSharedSpaces) {
4473     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4474   } else if (UseSharedSpaces) {
4475     // If any of the archived space fails to map, UseSharedSpaces
4476     // is reset to false. Fall through to the
4477     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4478     // metaspace.
4479     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4480   }
4481 
4482   if (!DumpSharedSpaces && !UseSharedSpaces)
4483 #endif // INCLUDE_CDS
4484   {
4485 #ifdef _LP64
4486     if (using_class_space()) {
4487       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4488       allocate_metaspace_compressed_klass_ptrs(base, 0);
4489     }
4490 #endif // _LP64
4491   }
4492 
4493   // Initialize these before initializing the VirtualSpaceList
4494   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4495   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4496   // Make the first class chunk bigger than a medium chunk so it's not put
4497   // on the medium chunk list.   The next chunk will be small and progress
4498   // from there.  This size calculated by -version.
4499   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4500                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4501   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4502   // Arbitrarily set the initial virtual space to a multiple
4503   // of the boot class loader size.
4504   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4505   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4506 
4507   // Initialize the list of virtual spaces.
4508   _space_list = new VirtualSpaceList(word_size);
4509   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4510 
4511   if (!_space_list->initialization_succeeded()) {
4512     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4513   }
4514 
4515   _tracer = new MetaspaceTracer();
4516 }
4517 
4518 void Metaspace::post_initialize() {
4519   MetaspaceGC::post_initialize();
4520 }
4521 
4522 void Metaspace::verify_global_initialization() {
4523   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4524   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4525 
4526   if (using_class_space()) {
4527     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4528     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4529   }
4530 }
4531 
4532 size_t Metaspace::align_word_size_up(size_t word_size) {
4533   size_t byte_size = word_size * wordSize;
4534   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4535 }
4536 
4537 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4538                               MetaspaceObj::Type type, TRAPS) {
4539   assert(!_frozen, "sanity");
4540   if (HAS_PENDING_EXCEPTION) {
4541     assert(false, "Should not allocate with exception pending");
4542     return NULL;  // caller does a CHECK_NULL too
4543   }
4544 
4545   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4546         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4547 
4548   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4549 
4550   // Try to allocate metadata.
4551   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4552 
4553   if (result == NULL) {
4554     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4555 
4556     // Allocation failed.
4557     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
4558       // Only start a GC if the bootstrapping has completed.
4559       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
4560       // the VM thread.
4561 
4562       // Try to clean out some memory and retry.
4563       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4564     }
4565   }
4566 
4567   if (result == NULL) {
4568     if (DumpSharedSpaces) {
4569       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
4570       // We should abort to avoid generating a potentially bad archive.
4571       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4572           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4573       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
4574       vm_exit(1);
4575     }
4576     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4577   }
4578 
4579   // Zero initialize.
4580   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4581 
4582   return result;
4583 }
4584 
4585 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4586   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4587 
4588   // If result is still null, we are out of memory.
4589   Log(gc, metaspace, freelist) log;
4590   if (log.is_info()) {
4591     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4592              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4593     ResourceMark rm;
4594     if (log.is_debug()) {
4595       if (loader_data->metaspace_or_null() != NULL) {
4596         LogStream ls(log.debug());
4597         loader_data->print_value_on(&ls);
4598       }
4599     }
4600     LogStream ls(log.info());
4601     // In case of an OOM, log out a short but still useful report.
4602     MetaspaceUtils::print_basic_report(&ls, 0);
4603   }
4604 
4605   bool out_of_compressed_class_space = false;
4606   if (is_class_space_allocation(mdtype)) {
4607     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4608     out_of_compressed_class_space =
4609       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4610       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4611       CompressedClassSpaceSize;
4612   }
4613 
4614   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4615   const char* space_string = out_of_compressed_class_space ?
4616     "Compressed class space" : "Metaspace";
4617 
4618   report_java_out_of_memory(space_string);
4619 
4620   if (JvmtiExport::should_post_resource_exhausted()) {
4621     JvmtiExport::post_resource_exhausted(
4622         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4623         space_string);
4624   }
4625 
4626   if (!is_init_completed()) {
4627     vm_exit_during_initialization("OutOfMemoryError", space_string);
4628   }
4629 
4630   if (out_of_compressed_class_space) {
4631     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4632   } else {
4633     THROW_OOP(Universe::out_of_memory_error_metaspace());
4634   }
4635 }
4636 
4637 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4638   switch (mdtype) {
4639     case Metaspace::ClassType: return "Class";
4640     case Metaspace::NonClassType: return "Metadata";
4641     default:
4642       assert(false, "Got bad mdtype: %d", (int) mdtype);
4643       return NULL;
4644   }
4645 }
4646 
4647 void Metaspace::purge(MetadataType mdtype) {
4648   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4649 }
4650 
4651 void Metaspace::purge() {
4652   MutexLockerEx cl(MetaspaceExpand_lock,
4653                    Mutex::_no_safepoint_check_flag);
4654   purge(NonClassType);
4655   if (using_class_space()) {
4656     purge(ClassType);
4657   }
4658 }
4659 
4660 bool Metaspace::contains(const void* ptr) {
4661   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4662     return true;
4663   }
4664   return contains_non_shared(ptr);
4665 }
4666 
4667 bool Metaspace::contains_non_shared(const void* ptr) {
4668   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4669      return true;
4670   }
4671 
4672   return get_space_list(NonClassType)->contains(ptr);
4673 }
4674 
4675 // ClassLoaderMetaspace
4676 
4677 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
4678   : _lock(lock)
4679   , _space_type(type)
4680   , _vsm(NULL)
4681   , _class_vsm(NULL)
4682 {
4683   initialize(lock, type);
4684 }
4685 
4686 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4687   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
4688   delete _vsm;
4689   if (Metaspace::using_class_space()) {
4690     delete _class_vsm;
4691   }
4692 }
4693 
4694 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4695   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4696   if (chunk != NULL) {
4697     // Add to this manager's list of chunks in use and make it the current_chunk().
4698     get_space_manager(mdtype)->add_chunk(chunk, true);
4699   }
4700 }
4701 
4702 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4703   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4704 
4705   // Get a chunk from the chunk freelist
4706   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4707 
4708   if (chunk == NULL) {
4709     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4710                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4711   }
4712 
4713   return chunk;
4714 }
4715 
4716 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4717   Metaspace::verify_global_initialization();
4718 
4719   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
4720 
4721   // Allocate SpaceManager for metadata objects.
4722   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4723 
4724   if (Metaspace::using_class_space()) {
4725     // Allocate SpaceManager for classes.
4726     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4727   }
4728 
4729   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4730 
4731   // Allocate chunk for metadata objects
4732   initialize_first_chunk(type, Metaspace::NonClassType);
4733 
4734   // Allocate chunk for class metadata objects
4735   if (Metaspace::using_class_space()) {
4736     initialize_first_chunk(type, Metaspace::ClassType);
4737   }
4738 }
4739 
4740 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4741   Metaspace::assert_not_frozen();
4742 
4743   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
4744 
4745   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4746   if (Metaspace::is_class_space_allocation(mdtype)) {
4747     return  class_vsm()->allocate(word_size);
4748   } else {
4749     return  vsm()->allocate(word_size);
4750   }
4751 }
4752 
4753 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4754   Metaspace::assert_not_frozen();
4755   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4756   assert(delta_bytes > 0, "Must be");
4757 
4758   size_t before = 0;
4759   size_t after = 0;
4760   MetaWord* res;
4761   bool incremented;
4762 
4763   // Each thread increments the HWM at most once. Even if the thread fails to increment
4764   // the HWM, an allocation is still attempted. This is because another thread must then
4765   // have incremented the HWM and therefore the allocation might still succeed.
4766   do {
4767     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4768     res = allocate(word_size, mdtype);
4769   } while (!incremented && res == NULL);
4770 
4771   if (incremented) {
4772     Metaspace::tracer()->report_gc_threshold(before, after,
4773                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4774     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4775   }
4776 
4777   return res;
4778 }
4779 
4780 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4781   return (vsm()->used_words() +
4782       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
4783 }
4784 
4785 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4786   return (vsm()->capacity_words() +
4787       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
4788 }
4789 
4790 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4791   Metaspace::assert_not_frozen();
4792   assert(!SafepointSynchronize::is_at_safepoint()
4793          || Thread::current()->is_VM_thread(), "should be the VM thread");
4794 
4795   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
4796 
4797   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4798 
4799   if (is_class && Metaspace::using_class_space()) {
4800     class_vsm()->deallocate(ptr, word_size);
4801   } else {
4802     vsm()->deallocate(ptr, word_size);
4803   }
4804 }
4805 
4806 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4807   assert(Metaspace::using_class_space(), "Has to use class space");
4808   return class_vsm()->calc_chunk_size(word_size);
4809 }
4810 
4811 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4812   // Print both class virtual space counts and metaspace.
4813   if (Verbose) {
4814     vsm()->print_on(out);
4815     if (Metaspace::using_class_space()) {
4816       class_vsm()->print_on(out);
4817     }
4818   }
4819 }
4820 
4821 void ClassLoaderMetaspace::verify() {
4822   vsm()->verify();
4823   if (Metaspace::using_class_space()) {
4824     class_vsm()->verify();
4825   }
4826 }
4827 
4828 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
4829   assert_lock_strong(lock());
4830   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
4831   if (Metaspace::using_class_space()) {
4832     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
4833   }
4834 }
4835 
4836 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
4837   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4838   add_to_statistics_locked(out);
4839 }
4840 
4841 #ifdef ASSERT
4842 static void do_verify_chunk(Metachunk* chunk) {
4843   guarantee(chunk != NULL, "Sanity");
4844   // Verify chunk itself; then verify that it is consistent with the
4845   // occupany map of its containing node.
4846   chunk->verify();
4847   VirtualSpaceNode* const vsn = chunk->container();
4848   OccupancyMap* const ocmap = vsn->occupancy_map();
4849   ocmap->verify_for_chunk(chunk);
4850 }
4851 #endif
4852 
4853 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4854   chunk->set_is_tagged_free(!inuse);
4855   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4856   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4857 }
4858 
4859 /////////////// Unit tests ///////////////
4860 
4861 #ifndef PRODUCT
4862 
4863 class TestMetaspaceUtilsTest : AllStatic {
4864  public:
4865   static void test_reserved() {
4866     size_t reserved = MetaspaceUtils::reserved_bytes();
4867 
4868     assert(reserved > 0, "assert");
4869 
4870     size_t committed  = MetaspaceUtils::committed_bytes();
4871     assert(committed <= reserved, "assert");
4872 
4873     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
4874     assert(reserved_metadata > 0, "assert");
4875     assert(reserved_metadata <= reserved, "assert");
4876 
4877     if (UseCompressedClassPointers) {
4878       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
4879       assert(reserved_class > 0, "assert");
4880       assert(reserved_class < reserved, "assert");
4881     }
4882   }
4883 
4884   static void test_committed() {
4885     size_t committed = MetaspaceUtils::committed_bytes();
4886 
4887     assert(committed > 0, "assert");
4888 
4889     size_t reserved  = MetaspaceUtils::reserved_bytes();
4890     assert(committed <= reserved, "assert");
4891 
4892     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
4893     assert(committed_metadata > 0, "assert");
4894     assert(committed_metadata <= committed, "assert");
4895 
4896     if (UseCompressedClassPointers) {
4897       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
4898       assert(committed_class > 0, "assert");
4899       assert(committed_class < committed, "assert");
4900     }
4901   }
4902 
4903   static void test_virtual_space_list_large_chunk() {
4904     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
4905     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4906     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
4907     // vm_allocation_granularity aligned on Windows.
4908     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
4909     large_size += (os::vm_page_size()/BytesPerWord);
4910     vs_list->get_new_chunk(large_size, 0);
4911   }
4912 
4913   static void test() {
4914     test_reserved();
4915     test_committed();
4916     test_virtual_space_list_large_chunk();
4917   }
4918 };
4919 
4920 void TestMetaspaceUtils_test() {
4921   TestMetaspaceUtilsTest::test();
4922 }
4923 
4924 class TestVirtualSpaceNodeTest {
4925   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
4926                                           size_t& num_small_chunks,
4927                                           size_t& num_specialized_chunks) {
4928     num_medium_chunks = words_left / MediumChunk;
4929     words_left = words_left % MediumChunk;
4930 
4931     num_small_chunks = words_left / SmallChunk;
4932     words_left = words_left % SmallChunk;
4933     // how many specialized chunks can we get?
4934     num_specialized_chunks = words_left / SpecializedChunk;
4935     assert(words_left % SpecializedChunk == 0, "should be nothing left");
4936   }
4937 
4938  public:
4939   static void test() {
4940     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4941     const size_t vsn_test_size_words = MediumChunk  * 4;
4942     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
4943 
4944     // The chunk sizes must be multiples of eachother, or this will fail
4945     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
4946     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
4947 
4948     { // No committed memory in VSN
4949       ChunkManager cm(false);
4950       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
4951       vsn.initialize();
4952       vsn.retire(&cm);
4953       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
4954     }
4955 
4956     { // All of VSN is committed, half is used by chunks
4957       ChunkManager cm(false);
4958       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
4959       vsn.initialize();
4960       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
4961       vsn.get_chunk_vs(MediumChunk);
4962       vsn.get_chunk_vs(MediumChunk);
4963       vsn.retire(&cm);
4964       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
4965       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
4966     }
4967 
4968     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
4969     // This doesn't work for systems with vm_page_size >= 16K.
4970     if (page_chunks < MediumChunk) {
4971       // 4 pages of VSN is committed, some is used by chunks
4972       ChunkManager cm(false);
4973       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
4974 
4975       vsn.initialize();
4976       vsn.expand_by(page_chunks, page_chunks);
4977       vsn.get_chunk_vs(SmallChunk);
4978       vsn.get_chunk_vs(SpecializedChunk);
4979       vsn.retire(&cm);
4980 
4981       // committed - used = words left to retire
4982       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
4983 
4984       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
4985       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
4986 
4987       assert(num_medium_chunks == 0, "should not get any medium chunks");
4988       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
4989       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
4990     }
4991 
4992     { // Half of VSN is committed, a humongous chunk is used
4993       ChunkManager cm(false);
4994       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
4995       vsn.initialize();
4996       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
4997       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
4998       vsn.retire(&cm);
4999 
5000       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5001       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5002       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5003 
5004       assert(num_medium_chunks == 0, "should not get any medium chunks");
5005       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5006       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5007     }
5008 
5009   }
5010 
5011 #define assert_is_available_positive(word_size) \
5012   assert(vsn.is_available(word_size), \
5013          #word_size ": " PTR_FORMAT " bytes were not available in " \
5014          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5015          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5016 
5017 #define assert_is_available_negative(word_size) \
5018   assert(!vsn.is_available(word_size), \
5019          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5020          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5021          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5022 
5023   static void test_is_available_positive() {
5024     // Reserve some memory.
5025     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5026     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5027 
5028     // Commit some memory.
5029     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5030     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5031     assert(expanded, "Failed to commit");
5032 
5033     // Check that is_available accepts the committed size.
5034     assert_is_available_positive(commit_word_size);
5035 
5036     // Check that is_available accepts half the committed size.
5037     size_t expand_word_size = commit_word_size / 2;
5038     assert_is_available_positive(expand_word_size);
5039   }
5040 
5041   static void test_is_available_negative() {
5042     // Reserve some memory.
5043     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5044     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5045 
5046     // Commit some memory.
5047     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5048     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5049     assert(expanded, "Failed to commit");
5050 
5051     // Check that is_available doesn't accept a too large size.
5052     size_t two_times_commit_word_size = commit_word_size * 2;
5053     assert_is_available_negative(two_times_commit_word_size);
5054   }
5055 
5056   static void test_is_available_overflow() {
5057     // Reserve some memory.
5058     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5059     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5060 
5061     // Commit some memory.
5062     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5063     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5064     assert(expanded, "Failed to commit");
5065 
5066     // Calculate a size that will overflow the virtual space size.
5067     void* virtual_space_max = (void*)(uintptr_t)-1;
5068     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5069     size_t overflow_size = bottom_to_max + BytesPerWord;
5070     size_t overflow_word_size = overflow_size / BytesPerWord;
5071 
5072     // Check that is_available can handle the overflow.
5073     assert_is_available_negative(overflow_word_size);
5074   }
5075 
5076   static void test_is_available() {
5077     TestVirtualSpaceNodeTest::test_is_available_positive();
5078     TestVirtualSpaceNodeTest::test_is_available_negative();
5079     TestVirtualSpaceNodeTest::test_is_available_overflow();
5080   }
5081 };
5082 
5083 // The following test is placed here instead of a gtest / unittest file
5084 // because the ChunkManager class is only available in this file.
5085 void ChunkManager_test_list_index() {
5086   {
5087     // Test previous bug where a query for a humongous class metachunk,
5088     // incorrectly matched the non-class medium metachunk size.
5089     {
5090       ChunkManager manager(true);
5091 
5092       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5093 
5094       ChunkIndex index = manager.list_index(MediumChunk);
5095 
5096       assert(index == HumongousIndex,
5097           "Requested size is larger than ClassMediumChunk,"
5098           " so should return HumongousIndex. Got index: %d", (int)index);
5099     }
5100 
5101     // Check the specified sizes as well.
5102     {
5103       ChunkManager manager(true);
5104       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5105       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5106       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5107       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5108     }
5109     {
5110       ChunkManager manager(false);
5111       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5112       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5113       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5114       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5115     }
5116 
5117   }
5118 
5119 }
5120 
5121 #endif // !PRODUCT
5122 
5123 #ifdef ASSERT
5124 
5125 // The following test is placed here instead of a gtest / unittest file
5126 // because the ChunkManager class is only available in this file.
5127 class SpaceManagerTest : AllStatic {
5128   friend void SpaceManager_test_adjust_initial_chunk_size();
5129 
5130   static void test_adjust_initial_chunk_size(bool is_class) {
5131     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5132     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5133     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5134 
5135 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5136     do {                                                                         \
5137       size_t v = value;                                                          \
5138       size_t e = expected;                                                       \
5139       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5140              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5141     } while (0)
5142 
5143     // Smallest (specialized)
5144     test_adjust_initial_chunk_size(1,            smallest, is_class);
5145     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5146     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5147 
5148     // Small
5149     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5150     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5151     test_adjust_initial_chunk_size(normal,       normal, is_class);
5152 
5153     // Medium
5154     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5155     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5156     test_adjust_initial_chunk_size(medium,     medium, is_class);
5157 
5158     // Humongous
5159     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5160 
5161 #undef test_adjust_initial_chunk_size
5162   }
5163 
5164   static void test_adjust_initial_chunk_size() {
5165     test_adjust_initial_chunk_size(false);
5166     test_adjust_initial_chunk_size(true);
5167   }
5168 };
5169 
5170 void SpaceManager_test_adjust_initial_chunk_size() {
5171   SpaceManagerTest::test_adjust_initial_chunk_size();
5172 }
5173 
5174 #endif // ASSERT
5175 
5176 struct chunkmanager_statistics_t {
5177   int num_specialized_chunks;
5178   int num_small_chunks;
5179   int num_medium_chunks;
5180   int num_humongous_chunks;
5181 };
5182 
5183 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5184   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5185   ChunkManagerStatistics stat;
5186   chunk_manager->collect_statistics(&stat);
5187   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5188   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5189   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5190   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5191 }
5192 
5193 struct chunk_geometry_t {
5194   size_t specialized_chunk_word_size;
5195   size_t small_chunk_word_size;
5196   size_t medium_chunk_word_size;
5197 };
5198 
5199 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5200   if (mdType == Metaspace::NonClassType) {
5201     out->specialized_chunk_word_size = SpecializedChunk;
5202     out->small_chunk_word_size = SmallChunk;
5203     out->medium_chunk_word_size = MediumChunk;
5204   } else {
5205     out->specialized_chunk_word_size = ClassSpecializedChunk;
5206     out->small_chunk_word_size = ClassSmallChunk;
5207     out->medium_chunk_word_size = ClassMediumChunk;
5208   }
5209 }