< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page
rev 50082 : imported patch metaspace-split


   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */

  24 #include "precompiled.hpp"

  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"


  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/metaspaceStatistics.hpp"
  38 #include "memory/metaspaceGCThresholdUpdater.hpp"

  39 #include "memory/metaspaceShared.hpp"
  40 #include "memory/metaspaceTracer.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutex.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "runtime/orderAccess.inline.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/macros.hpp"
  57 
  58 using namespace metaspace::internals;
  59 
  60 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  61 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  62 
  63 // Helper function that does a bunch of checks for a chunk.
  64 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  65 
  66 // Given a Metachunk, update its in-use information (both in the
  67 // chunk and the occupancy map).
  68 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  69 
  70 size_t const allocation_from_dictionary_limit = 4 * K;
  71 
  72 MetaWord* last_allocated = 0;
  73 
  74 size_t Metaspace::_compressed_class_space_size;
  75 const MetaspaceTracer* Metaspace::_tracer = NULL;
  76 
  77 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  78 
  79 // Internal statistics.
  80 #ifdef ASSERT
  81 static struct {
  82   // Number of allocations.
  83   uintx num_allocs;
  84   // Number of times a ClassLoaderMetaspace was born...
  85   uintx num_metaspace_births;
  86   // ... and died.
  87   uintx num_metaspace_deaths;
  88   // Number of times VirtualSpaceListNodes were created...
  89   uintx num_vsnodes_created;
  90   // ... and purged.
  91   uintx num_vsnodes_purged;
  92   // Number of times we expanded the committed section of the space.
  93   uintx num_committed_space_expanded;
  94   // Number of deallocations
  95   uintx num_deallocs;
  96   // Number of deallocations triggered from outside ("real" deallocations).
  97   uintx num_external_deallocs;
  98   // Number of times an allocation was satisfied from deallocated blocks.
  99   uintx num_allocs_from_deallocated_blocks;
 100 } g_internal_statistics;
 101 #endif
 102 
 103 enum ChunkSizes {    // in words.
 104   ClassSpecializedChunk = 128,
 105   SpecializedChunk = 128,
 106   ClassSmallChunk = 256,
 107   SmallChunk = 512,
 108   ClassMediumChunk = 4 * K,
 109   MediumChunk = 8 * K
 110 };
 111 
 112 // Returns size of this chunk type.
 113 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
 114   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
 115   size_t size = 0;
 116   if (is_class) {
 117     switch(chunktype) {
 118       case SpecializedIndex: size = ClassSpecializedChunk; break;
 119       case SmallIndex: size = ClassSmallChunk; break;
 120       case MediumIndex: size = ClassMediumChunk; break;
 121       default:
 122         ShouldNotReachHere();
 123     }
 124   } else {
 125     switch(chunktype) {
 126       case SpecializedIndex: size = SpecializedChunk; break;
 127       case SmallIndex: size = SmallChunk; break;
 128       case MediumIndex: size = MediumChunk; break;
 129       default:
 130         ShouldNotReachHere();
 131     }
 132   }
 133   return size;
 134 }
 135 
 136 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 137   if (is_class) {
 138     if (size == ClassSpecializedChunk) {
 139       return SpecializedIndex;
 140     } else if (size == ClassSmallChunk) {
 141       return SmallIndex;
 142     } else if (size == ClassMediumChunk) {
 143       return MediumIndex;
 144     } else if (size > ClassMediumChunk) {
 145       // A valid humongous chunk size is a multiple of the smallest chunk size.
 146       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 147       return HumongousIndex;
 148     }
 149   } else {
 150     if (size == SpecializedChunk) {
 151       return SpecializedIndex;
 152     } else if (size == SmallChunk) {
 153       return SmallIndex;
 154     } else if (size == MediumChunk) {
 155       return MediumIndex;
 156     } else if (size > MediumChunk) {
 157       // A valid humongous chunk size is a multiple of the smallest chunk size.
 158       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 159       return HumongousIndex;
 160     }
 161   }
 162   ShouldNotReachHere();
 163   return (ChunkIndex)-1;
 164 }
 165 
 166 ChunkIndex next_chunk_index(ChunkIndex i) {
 167   assert(i < NumberOfInUseLists, "Out of bound");
 168   return (ChunkIndex) (i+1);
 169 }
 170 
 171 ChunkIndex prev_chunk_index(ChunkIndex i) {
 172   assert(i > ZeroIndex, "Out of bound");
 173   return (ChunkIndex) (i-1);
 174 }
 175 
 176 static const char* space_type_name(Metaspace::MetaspaceType t) {
 177   const char* s = NULL;
 178   switch (t) {
 179     case Metaspace::StandardMetaspaceType: s = "Standard"; break;
 180     case Metaspace::BootMetaspaceType: s = "Boot"; break;
 181     case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
 182     case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
 183     default: ShouldNotReachHere();
 184   }
 185   return s;
 186 }
 187 
 188 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 189 uint MetaspaceGC::_shrink_factor = 0;
 190 bool MetaspaceGC::_should_concurrent_collect = false;
 191 

 192 
 193 typedef class FreeList<Metachunk> ChunkList;
 194 
 195 // Manages the global free lists of chunks.
 196 class ChunkManager : public CHeapObj<mtInternal> {
 197   friend class TestVirtualSpaceNodeTest;
 198 
 199   // Free list of chunks of different sizes.
 200   //   SpecializedChunk
 201   //   SmallChunk
 202   //   MediumChunk
 203   ChunkList _free_chunks[NumberOfFreeLists];
 204 
 205   // Whether or not this is the class chunkmanager.
 206   const bool _is_class;
 207 
 208   // Return non-humongous chunk list by its index.
 209   ChunkList* free_chunks(ChunkIndex index);
 210 
 211   // Returns non-humongous chunk list for the given chunk word size.
 212   ChunkList* find_free_chunks_list(size_t word_size);
 213 
 214   //   HumongousChunk
 215   ChunkTreeDictionary _humongous_dictionary;
 216 
 217   // Returns the humongous chunk dictionary.
 218   ChunkTreeDictionary* humongous_dictionary() {
 219     return &_humongous_dictionary;
 220   }
 221 
 222   // Size, in metaspace words, of all chunks managed by this ChunkManager
 223   size_t _free_chunks_total;
 224   // Number of chunks in this ChunkManager
 225   size_t _free_chunks_count;
 226 
 227   // Update counters after a chunk was added or removed removed.
 228   void account_for_added_chunk(const Metachunk* c);
 229   void account_for_removed_chunk(const Metachunk* c);
 230 
 231   // Debug support
 232 
 233   size_t sum_free_chunks();
 234   size_t sum_free_chunks_count();
 235 
 236   void locked_verify_free_chunks_total();
 237   void slow_locked_verify_free_chunks_total() {
 238     if (VerifyMetaspace) {
 239       locked_verify_free_chunks_total();
 240     }
 241   }
 242   void locked_verify_free_chunks_count();
 243   void slow_locked_verify_free_chunks_count() {
 244     if (VerifyMetaspace) {
 245       locked_verify_free_chunks_count();
 246     }
 247   }
 248 
 249   // Given a pointer to a chunk, attempts to merge it with neighboring
 250   // free chunks to form a bigger chunk. Returns true if successful.
 251   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 252 
 253   // Helper for chunk merging:
 254   //  Given an address range with 1-n chunks which are all supposed to be
 255   //  free and hence currently managed by this ChunkManager, remove them
 256   //  from this ChunkManager and mark them as invalid.
 257   // - This does not correct the occupancy map.
 258   // - This does not adjust the counters in ChunkManager.
 259   // - Does not adjust container count counter in containing VirtualSpaceNode.
 260   // Returns number of chunks removed.
 261   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 262 
 263   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 264   // split up the larger chunk into n smaller chunks, at least one of which should be
 265   // the target chunk of target chunk size. The smaller chunks, including the target
 266   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 267   // Note that this chunk is supposed to be removed from the freelist right away.
 268   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 269 
 270  public:
 271 
 272   ChunkManager(bool is_class)
 273       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 274     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 275     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 276     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 277   }
 278 
 279   // Add or delete (return) a chunk to the global freelist.
 280   Metachunk* chunk_freelist_allocate(size_t word_size);
 281 
 282   // Map a size to a list index assuming that there are lists
 283   // for special, small, medium, and humongous chunks.
 284   ChunkIndex list_index(size_t size);
 285 
 286   // Map a given index to the chunk size.
 287   size_t size_by_index(ChunkIndex index) const;
 288 
 289   bool is_class() const { return _is_class; }
 290 
 291   // Convenience accessors.
 292   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 293   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 294   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 295 
 296   // Take a chunk from the ChunkManager. The chunk is expected to be in
 297   // the chunk manager (the freelist if non-humongous, the dictionary if
 298   // humongous).
 299   void remove_chunk(Metachunk* chunk);
 300 
 301   // Return a single chunk of type index to the ChunkManager.
 302   void return_single_chunk(Metachunk* chunk);
 303 
 304   // Add the simple linked list of chunks to the freelist of chunks
 305   // of type index.
 306   void return_chunk_list(Metachunk* chunks);
 307 
 308   // Total of the space in the free chunks list
 309   size_t free_chunks_total_words();
 310   size_t free_chunks_total_bytes();
 311 
 312   // Number of chunks in the free chunks list
 313   size_t free_chunks_count();
 314 
 315   // Remove from a list by size.  Selects list based on size of chunk.
 316   Metachunk* free_chunks_get(size_t chunk_word_size);
 317 
 318 #define index_bounds_check(index)                                         \
 319   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 320 
 321   size_t num_free_chunks(ChunkIndex index) const {
 322     index_bounds_check(index);
 323 
 324     if (index == HumongousIndex) {
 325       return _humongous_dictionary.total_free_blocks();
 326     }
 327 
 328     ssize_t count = _free_chunks[index].count();
 329     return count == -1 ? 0 : (size_t) count;
 330   }
 331 
 332   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 333     index_bounds_check(index);
 334 
 335     size_t word_size = 0;
 336     if (index == HumongousIndex) {
 337       word_size = _humongous_dictionary.total_size();
 338     } else {
 339       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 340       word_size = size_per_chunk_in_words * num_free_chunks(index);
 341     }
 342 
 343     return word_size * BytesPerWord;
 344   }
 345 
 346   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 347     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 348                                          num_free_chunks(SmallIndex),
 349                                          num_free_chunks(MediumIndex),
 350                                          num_free_chunks(HumongousIndex),
 351                                          size_free_chunks_in_bytes(SpecializedIndex),
 352                                          size_free_chunks_in_bytes(SmallIndex),
 353                                          size_free_chunks_in_bytes(MediumIndex),
 354                                          size_free_chunks_in_bytes(HumongousIndex));
 355   }
 356 
 357   // Debug support
 358   void verify();
 359   void slow_verify() {
 360     if (VerifyMetaspace) {
 361       verify();
 362     }
 363   }
 364   void locked_verify();
 365   void slow_locked_verify() {
 366     if (VerifyMetaspace) {
 367       locked_verify();
 368     }
 369   }
 370 
 371   void locked_print_free_chunks(outputStream* st);
 372   void locked_print_sum_free_chunks(outputStream* st);
 373 
 374   // Fill in current statistic values to the given statistics object.
 375   void collect_statistics(ChunkManagerStatistics* out) const;
 376 
 377 };
 378 
 379 class SmallBlocks : public CHeapObj<mtClass> {
 380   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 381   // Note: this corresponds to the imposed miminum allocation size, see SpaceManager::get_allocation_word_size()
 382   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 383 
 384  private:
 385   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 386 
 387   FreeList<Metablock>& list_at(size_t word_size) {
 388     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 389     return _small_lists[word_size - _small_block_min_size];
 390   }
 391 
 392  public:
 393   SmallBlocks() {
 394     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 395       uint k = i - _small_block_min_size;
 396       _small_lists[k].set_size(i);
 397     }
 398   }
 399 
 400   // Returns the total size, in words, of all blocks, across all block sizes.
 401   size_t total_size() const {
 402     size_t result = 0;
 403     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 404       uint k = i - _small_block_min_size;
 405       result = result + _small_lists[k].count() * _small_lists[k].size();
 406     }
 407     return result;
 408   }
 409 
 410   // Returns the total number of all blocks across all block sizes.
 411   uintx total_num_blocks() const {
 412     uintx result = 0;
 413     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 414       uint k = i - _small_block_min_size;
 415       result = result + _small_lists[k].count();
 416     }
 417     return result;
 418   }
 419 
 420   static uint small_block_max_size() { return _small_block_max_size; }
 421   static uint small_block_min_size() { return _small_block_min_size; }
 422 
 423   MetaWord* get_block(size_t word_size) {
 424     if (list_at(word_size).count() > 0) {
 425       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 426       return new_block;
 427     } else {
 428       return NULL;
 429     }
 430   }
 431   void return_block(Metablock* free_chunk, size_t word_size) {
 432     list_at(word_size).return_chunk_at_head(free_chunk, false);
 433     assert(list_at(word_size).count() > 0, "Should have a chunk");
 434   }
 435 
 436   void print_on(outputStream* st) const {
 437     st->print_cr("SmallBlocks:");
 438     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 439       uint k = i - _small_block_min_size;
 440       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 441     }
 442   }
 443 };
 444 
 445 // Used to manage the free list of Metablocks (a block corresponds
 446 // to the allocation of a quantum of metadata).
 447 class BlockFreelist : public CHeapObj<mtClass> {
 448   BlockTreeDictionary* const _dictionary;
 449   SmallBlocks* _small_blocks;
 450 
 451   // Only allocate and split from freelist if the size of the allocation
 452   // is at least 1/4th the size of the available block.
 453   const static int WasteMultiplier = 4;
 454 
 455   // Accessors
 456   BlockTreeDictionary* dictionary() const { return _dictionary; }
 457   SmallBlocks* small_blocks() {
 458     if (_small_blocks == NULL) {
 459       _small_blocks = new SmallBlocks();
 460     }
 461     return _small_blocks;
 462   }
 463 
 464  public:
 465   BlockFreelist();
 466   ~BlockFreelist();
 467 
 468   // Get and return a block to the free list
 469   MetaWord* get_block(size_t word_size);
 470   void return_block(MetaWord* p, size_t word_size);
 471 
 472   // Returns the total size, in words, of all blocks kept in this structure.
 473   size_t total_size() const  {
 474     size_t result = dictionary()->total_size();
 475     if (_small_blocks != NULL) {
 476       result = result + _small_blocks->total_size();
 477     }
 478     return result;
 479   }
 480 
 481   // Returns the number of all blocks kept in this structure.
 482   uintx num_blocks() const {
 483     uintx result = dictionary()->total_free_blocks();
 484     if (_small_blocks != NULL) {
 485       result = result + _small_blocks->total_num_blocks();
 486     }
 487     return result;
 488   }
 489 
 490   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 491   void print_on(outputStream* st) const;
 492 };
 493 
 494 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 495 template <typename T> struct all_ones  { static const T value; };
 496 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 497 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 498 
 499 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 500 // keeps information about
 501 // - where a chunk starts
 502 // - whether a chunk is in-use or free
 503 // A bit in this bitmap represents one range of memory in the smallest
 504 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 505 class OccupancyMap : public CHeapObj<mtInternal> {
 506 
 507   // The address range this map covers.
 508   const MetaWord* const _reference_address;
 509   const size_t _word_size;
 510 
 511   // The word size of a specialized chunk, aka the number of words one
 512   // bit in this map represents.
 513   const size_t _smallest_chunk_word_size;
 514 
 515   // map data
 516   // Data are organized in two bit layers:
 517   // The first layer is the chunk-start-map. Here, a bit is set to mark
 518   // the corresponding region as the head of a chunk.
 519   // The second layer is the in-use-map. Here, a set bit indicates that
 520   // the corresponding belongs to a chunk which is in use.
 521   uint8_t* _map[2];
 522 
 523   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 524 
 525   // length, in bytes, of bitmap data
 526   size_t _map_size;
 527 
 528   // Returns true if bit at position pos at bit-layer layer is set.
 529   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 530     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 531     const unsigned byteoffset = pos / 8;
 532     assert(byteoffset < _map_size,
 533            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 534     const unsigned mask = 1 << (pos % 8);
 535     return (_map[layer][byteoffset] & mask) > 0;
 536   }
 537 
 538   // Changes bit at position pos at bit-layer layer to value v.
 539   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 540     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 541     const unsigned byteoffset = pos / 8;
 542     assert(byteoffset < _map_size,
 543            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 544     const unsigned mask = 1 << (pos % 8);
 545     if (v) {
 546       _map[layer][byteoffset] |= mask;
 547     } else {
 548       _map[layer][byteoffset] &= ~mask;
 549     }
 550   }
 551 
 552   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 553   // pos is 32/64 aligned and num_bits is 32/64.
 554   // This is the typical case when coalescing to medium chunks, whose size is
 555   // 32 or 64 times the specialized chunk size (depending on class or non class
 556   // case), so they occupy 64 bits which should be 64bit aligned, because
 557   // chunks are chunk-size aligned.
 558   template <typename T>
 559   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 560     assert(_map_size > 0, "not initialized");
 561     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 562     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 563     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 564     const size_t byteoffset = pos / 8;
 565     assert(byteoffset <= (_map_size - sizeof(T)),
 566            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 567     const T w = *(T*)(_map[layer] + byteoffset);
 568     return w > 0 ? true : false;
 569   }
 570 
 571   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 572   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 573     if (pos % 32 == 0 && num_bits == 32) {
 574       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 575     } else if (pos % 64 == 0 && num_bits == 64) {
 576       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 577     } else {
 578       for (unsigned n = 0; n < num_bits; n ++) {
 579         if (get_bit_at_position(pos + n, layer)) {
 580           return true;
 581         }
 582       }
 583     }
 584     return false;
 585   }
 586 
 587   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 588   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 589     assert(word_size % _smallest_chunk_word_size == 0,
 590         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 591     const unsigned pos = get_bitpos_for_address(p);
 592     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 593     return is_any_bit_set_in_region(pos, num_bits, layer);
 594   }
 595 
 596   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 597   // pos is 32/64 aligned and num_bits is 32/64.
 598   // This is the typical case when coalescing to medium chunks, whose size
 599   // is 32 or 64 times the specialized chunk size (depending on class or non
 600   // class case), so they occupy 64 bits which should be 64bit aligned,
 601   // because chunks are chunk-size aligned.
 602   template <typename T>
 603   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 604     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 605            (unsigned)(sizeof(T) * 8), pos);
 606     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 607            num_bits, (unsigned)(sizeof(T) * 8));
 608     const size_t byteoffset = pos / 8;
 609     assert(byteoffset <= (_map_size - sizeof(T)),
 610            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 611     T* const pw = (T*)(_map[layer] + byteoffset);
 612     *pw = v ? all_ones<T>::value : (T) 0;
 613   }
 614 
 615   // Set all bits in a region starting at pos to a value.
 616   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 617     assert(_map_size > 0, "not initialized");
 618     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 619     if (pos % 32 == 0 && num_bits == 32) {
 620       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 621     } else if (pos % 64 == 0 && num_bits == 64) {
 622       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 623     } else {
 624       for (unsigned n = 0; n < num_bits; n ++) {
 625         set_bit_at_position(pos + n, layer, v);
 626       }
 627     }
 628   }
 629 
 630   // Helper: sets all bits in a region [p, p+word_size).
 631   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 632     assert(word_size % _smallest_chunk_word_size == 0,
 633         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 634     const unsigned pos = get_bitpos_for_address(p);
 635     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 636     set_bits_of_region(pos, num_bits, layer, v);
 637   }
 638 
 639   // Helper: given an address, return the bit position representing that address.
 640   unsigned get_bitpos_for_address(const MetaWord* p) const {
 641     assert(_reference_address != NULL, "not initialized");
 642     assert(p >= _reference_address && p < _reference_address + _word_size,
 643            "Address %p out of range for occupancy map [%p..%p).",
 644             p, _reference_address, _reference_address + _word_size);
 645     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 646            "Address not aligned (%p).", p);
 647     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 648     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 649     return (unsigned) d;
 650   }
 651 
 652  public:
 653 
 654   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 655     _reference_address(reference_address), _word_size(word_size),
 656     _smallest_chunk_word_size(smallest_chunk_word_size) {
 657     assert(reference_address != NULL, "invalid reference address");
 658     assert(is_aligned(reference_address, smallest_chunk_word_size),
 659            "Reference address not aligned to smallest chunk size.");
 660     assert(is_aligned(word_size, smallest_chunk_word_size),
 661            "Word_size shall be a multiple of the smallest chunk size.");
 662     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 663     size_t num_bits = word_size / smallest_chunk_word_size;
 664     _map_size = (num_bits + 7) / 8;
 665     assert(_map_size * 8 >= num_bits, "sanity");
 666     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 667     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 668     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 669     memset(_map[1], 0, _map_size);
 670     memset(_map[0], 0, _map_size);
 671     // Sanity test: the first respectively last possible chunk start address in
 672     // the covered range shall map to the first and last bit in the bitmap.
 673     assert(get_bitpos_for_address(reference_address) == 0,
 674       "First chunk address in range must map to fist bit in bitmap.");
 675     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 676       "Last chunk address in range must map to last bit in bitmap.");
 677   }
 678 
 679   ~OccupancyMap() {
 680     os::free(_map[0]);
 681     os::free(_map[1]);
 682   }
 683 
 684   // Returns true if at address x a chunk is starting.
 685   bool chunk_starts_at_address(MetaWord* p) const {
 686     const unsigned pos = get_bitpos_for_address(p);
 687     return get_bit_at_position(pos, layer_chunk_start_map);
 688   }
 689 
 690   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 691     const unsigned pos = get_bitpos_for_address(p);
 692     set_bit_at_position(pos, layer_chunk_start_map, v);
 693   }
 694 
 695   // Removes all chunk-start-bits inside a region, typically as a
 696   // result of a chunk merge.
 697   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 698     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 699   }
 700 
 701   // Returns true if there are life (in use) chunks in the region limited
 702   // by [p, p+word_size).
 703   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 704     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 705   }
 706 
 707   // Marks the region starting at p with the size word_size as in use
 708   // or free, depending on v.
 709   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 710     set_bits_of_region(p, word_size, layer_in_use_map, v);
 711   }
 712 
 713 #ifdef ASSERT
 714   // Verify occupancy map for the address range [from, to).
 715   // We need to tell it the address range, because the memory the
 716   // occupancy map is covering may not be fully comitted yet.
 717   void verify(MetaWord* from, MetaWord* to) {
 718     Metachunk* chunk = NULL;
 719     int nth_bit_for_chunk = 0;
 720     MetaWord* chunk_end = NULL;
 721     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 722       const unsigned pos = get_bitpos_for_address(p);
 723       // Check the chunk-starts-info:
 724       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 725         // Chunk start marked in bitmap.
 726         chunk = (Metachunk*) p;
 727         if (chunk_end != NULL) {
 728           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 729                  "the next chunk to start at %p).", p, chunk_end);
 730         }
 731         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 732         if (chunk->get_chunk_type() != HumongousIndex) {
 733           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 734         }
 735         chunk_end = p + chunk->word_size();
 736         nth_bit_for_chunk = 0;
 737         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 738       } else {
 739         // No chunk start marked in bitmap.
 740         assert(chunk != NULL, "Chunk should start at start of address range.");
 741         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 742         nth_bit_for_chunk ++;
 743       }
 744       // Check the in-use-info:
 745       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 746       if (in_use_bit) {
 747         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 748                chunk, nth_bit_for_chunk);
 749       } else {
 750         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 751                chunk, nth_bit_for_chunk);
 752       }
 753     }
 754   }
 755 
 756   // Verify that a given chunk is correctly accounted for in the bitmap.
 757   void verify_for_chunk(Metachunk* chunk) {
 758     assert(chunk_starts_at_address((MetaWord*) chunk),
 759            "No chunk start marked in map for chunk %p.", chunk);
 760     // For chunks larger than the minimal chunk size, no other chunk
 761     // must start in its area.
 762     if (chunk->word_size() > _smallest_chunk_word_size) {
 763       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 764                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 765              "No chunk must start within another chunk.");
 766     }
 767     if (!chunk->is_tagged_free()) {
 768       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 769              "Chunk %p is in use but marked as free in map (%d %d).",
 770              chunk, chunk->get_chunk_type(), chunk->get_origin());
 771     } else {
 772       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 773              "Chunk %p is free but marked as in-use in map (%d %d).",
 774              chunk, chunk->get_chunk_type(), chunk->get_origin());
 775     }
 776   }
 777 
 778 #endif // ASSERT
 779 
 780 };
 781 
 782 // A VirtualSpaceList node.
 783 class VirtualSpaceNode : public CHeapObj<mtClass> {
 784   friend class VirtualSpaceList;
 785 
 786   // Link to next VirtualSpaceNode
 787   VirtualSpaceNode* _next;
 788 
 789   // Whether this node is contained in class or metaspace.
 790   const bool _is_class;
 791 
 792   // total in the VirtualSpace
 793   MemRegion _reserved;
 794   ReservedSpace _rs;
 795   VirtualSpace _virtual_space;
 796   MetaWord* _top;
 797   // count of chunks contained in this VirtualSpace
 798   uintx _container_count;
 799 
 800   OccupancyMap* _occupancy_map;
 801 
 802   // Convenience functions to access the _virtual_space
 803   char* low()  const { return virtual_space()->low(); }
 804   char* high() const { return virtual_space()->high(); }
 805 
 806   // The first Metachunk will be allocated at the bottom of the
 807   // VirtualSpace
 808   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 809 
 810   // Committed but unused space in the virtual space
 811   size_t free_words_in_vs() const;
 812 
 813   // True if this node belongs to class metaspace.
 814   bool is_class() const { return _is_class; }
 815 
 816   // Helper function for take_from_committed: allocate padding chunks
 817   // until top is at the given address.
 818   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 819 
 820  public:
 821 
 822   VirtualSpaceNode(bool is_class, size_t byte_size);
 823   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 824     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 825   ~VirtualSpaceNode();
 826 
 827   // Convenience functions for logical bottom and end
 828   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 829   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 830 
 831   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 832   OccupancyMap* occupancy_map() { return _occupancy_map; }
 833 
 834   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 835 
 836   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 837   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 838 
 839   bool is_pre_committed() const { return _virtual_space.special(); }
 840 
 841   // address of next available space in _virtual_space;
 842   // Accessors
 843   VirtualSpaceNode* next() { return _next; }
 844   void set_next(VirtualSpaceNode* v) { _next = v; }
 845 
 846   void set_reserved(MemRegion const v) { _reserved = v; }
 847   void set_top(MetaWord* v) { _top = v; }
 848 
 849   // Accessors
 850   MemRegion* reserved() { return &_reserved; }
 851   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 852 
 853   // Returns true if "word_size" is available in the VirtualSpace
 854   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 855 
 856   MetaWord* top() const { return _top; }
 857   void inc_top(size_t word_size) { _top += word_size; }
 858 
 859   uintx container_count() { return _container_count; }
 860   void inc_container_count();
 861   void dec_container_count();
 862 #ifdef ASSERT
 863   uintx container_count_slow();
 864   void verify_container_count();
 865 #endif
 866 
 867   // used and capacity in this single entry in the list
 868   size_t used_words_in_vs() const;
 869   size_t capacity_words_in_vs() const;
 870 
 871   bool initialize();
 872 
 873   // get space from the virtual space
 874   Metachunk* take_from_committed(size_t chunk_word_size);
 875 
 876   // Allocate a chunk from the virtual space and return it.
 877   Metachunk* get_chunk_vs(size_t chunk_word_size);
 878 
 879   // Expands/shrinks the committed space in a virtual space.  Delegates
 880   // to Virtualspace
 881   bool expand_by(size_t min_words, size_t preferred_words);
 882 
 883   // In preparation for deleting this node, remove all the chunks
 884   // in the node from any freelist.
 885   void purge(ChunkManager* chunk_manager);
 886 
 887   // If an allocation doesn't fit in the current node a new node is created.
 888   // Allocate chunks out of the remaining committed space in this node
 889   // to avoid wasting that memory.
 890   // This always adds up because all the chunk sizes are multiples of
 891   // the smallest chunk size.
 892   void retire(ChunkManager* chunk_manager);
 893 
 894 
 895   void print_on(outputStream* st) const                 { print_on(st, K); }
 896   void print_on(outputStream* st, size_t scale) const;
 897   void print_map(outputStream* st, bool is_class) const;
 898 
 899   // Debug support
 900   DEBUG_ONLY(void mangle();)
 901   // Verify counters, all chunks in this list node and the occupancy map.
 902   DEBUG_ONLY(void verify();)
 903   // Verify that all free chunks in this node are ideally merged
 904   // (there not should be multiple small chunks where a large chunk could exist.)
 905   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 906 
 907 };
 908 
 909 #define assert_is_aligned(value, alignment)                  \
 910   assert(is_aligned((value), (alignment)),                   \
 911          SIZE_FORMAT_HEX " is not aligned to "               \
 912          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 913 
 914 #define assert_counter(expected_value, real_value, msg) \
 915   assert( (expected_value) == (real_value),             \
 916          "Counter mismatch (%s): expected " SIZE_FORMAT \
 917          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
 918          real_value);
 919 
 920 // Decide if large pages should be committed when the memory is reserved.
 921 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 922   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 923     size_t words = bytes / BytesPerWord;
 924     bool is_class = false; // We never reserve large pages for the class space.
 925     if (MetaspaceGC::can_expand(words, is_class) &&
 926         MetaspaceGC::allowed_expansion() >= words) {
 927       return true;
 928     }
 929   }
 930 
 931   return false;
 932 }
 933 
 934   // byte_size is the size of the associated virtualspace.
 935 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 936   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 937   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 938   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 939   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 940 
 941   if (_rs.is_reserved()) {
 942     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 943     assert(_rs.size() != 0, "Catch if we get a 0 size");
 944     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 945     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 946 
 947     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 948   }
 949 }
 950 
 951 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 952   DEBUG_ONLY(this->verify();)
 953   Metachunk* chunk = first_chunk();
 954   Metachunk* invalid_chunk = (Metachunk*) top();
 955   while (chunk < invalid_chunk ) {
 956     assert(chunk->is_tagged_free(), "Should be tagged free");
 957     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 958     chunk_manager->remove_chunk(chunk);
 959     chunk->remove_sentinel();
 960     assert(chunk->next() == NULL &&
 961            chunk->prev() == NULL,
 962            "Was not removed from its list");
 963     chunk = (Metachunk*) next;
 964   }
 965 }
 966 
 967 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
















 968 
 969   if (bottom() == top()) {
 970     return;
 971   }







 972 
 973   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 974   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 975   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 976 
 977   int line_len = 100;
 978   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 979   line_len = (int)(section_len / spec_chunk_size);
 980 
 981   static const int NUM_LINES = 4;
 982 
 983   char* lines[NUM_LINES];
 984   for (int i = 0; i < NUM_LINES; i ++) {
 985     lines[i] = (char*)os::malloc(line_len, mtInternal);
 986   }
 987   int pos = 0;
 988   const MetaWord* p = bottom();
 989   const Metachunk* chunk = (const Metachunk*)p;
 990   const MetaWord* chunk_end = p + chunk->word_size();
 991   while (p < top()) {
 992     if (pos == line_len) {
 993       pos = 0;
 994       for (int i = 0; i < NUM_LINES; i ++) {
 995         st->fill_to(22);
 996         st->print_raw(lines[i], line_len);
 997         st->cr();
 998       }
 999     }
1000     if (pos == 0) {
1001       st->print(PTR_FORMAT ":", p2i(p));
1002     }
1003     if (p == chunk_end) {
1004       chunk = (Metachunk*)p;
1005       chunk_end = p + chunk->word_size();
1006     }
1007     // line 1: chunk starting points (a dot if that area is a chunk start).
1008     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
1009 
1010     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
1011     // chunk is in use.
1012     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
1013     if (chunk->word_size() == spec_chunk_size) {
1014       lines[1][pos] = chunk_is_free ? 'x' : 'X';
1015     } else if (chunk->word_size() == small_chunk_size) {
1016       lines[1][pos] = chunk_is_free ? 's' : 'S';
1017     } else if (chunk->word_size() == med_chunk_size) {
1018       lines[1][pos] = chunk_is_free ? 'm' : 'M';
1019     } else if (chunk->word_size() > med_chunk_size) {
1020       lines[1][pos] = chunk_is_free ? 'h' : 'H';
1021     } else {
1022       ShouldNotReachHere();
1023     }
1024 
1025     // Line 3: chunk origin
1026     const ChunkOrigin origin = chunk->get_origin();
1027     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
1028 
1029     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
1030     //         but were never used.
1031     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
1032 
1033     p += spec_chunk_size;
1034     pos ++;
1035   }
1036   if (pos > 0) {
1037     for (int i = 0; i < NUM_LINES; i ++) {
1038       st->fill_to(22);
1039       st->print_raw(lines[i], line_len);
1040       st->cr();
1041     }
1042   }
1043   for (int i = 0; i < NUM_LINES; i ++) {
1044     os::free(lines[i]);
1045   }
1046 }
1047 
1048 
1049 #ifdef ASSERT
1050 uintx VirtualSpaceNode::container_count_slow() {
1051   uintx count = 0;
1052   Metachunk* chunk = first_chunk();
1053   Metachunk* invalid_chunk = (Metachunk*) top();
1054   while (chunk < invalid_chunk ) {
1055     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1056     do_verify_chunk(chunk);
1057     // Don't count the chunks on the free lists.  Those are
1058     // still part of the VirtualSpaceNode but not currently
1059     // counted.
1060     if (!chunk->is_tagged_free()) {
1061       count++;
1062     }
1063     chunk = (Metachunk*) next;
1064   }
1065   return count;
1066 }
1067 #endif
1068 
1069 #ifdef ASSERT
1070 // Verify counters, all chunks in this list node and the occupancy map.
1071 void VirtualSpaceNode::verify() {
1072   uintx num_in_use_chunks = 0;
1073   Metachunk* chunk = first_chunk();
1074   Metachunk* invalid_chunk = (Metachunk*) top();
1075 
1076   // Iterate the chunks in this node and verify each chunk.
1077   while (chunk < invalid_chunk ) {
1078     DEBUG_ONLY(do_verify_chunk(chunk);)
1079     if (!chunk->is_tagged_free()) {
1080       num_in_use_chunks ++;
1081     }
1082     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1083     chunk = (Metachunk*) next;
1084   }
1085   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1086          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1087   // Also verify the occupancy map.
1088   occupancy_map()->verify(this->bottom(), this->top());
1089 }
1090 #endif // ASSERT
1091 
1092 #ifdef ASSERT
1093 // Verify that all free chunks in this node are ideally merged
1094 // (there not should be multiple small chunks where a large chunk could exist.)
1095 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1096   Metachunk* chunk = first_chunk();
1097   Metachunk* invalid_chunk = (Metachunk*) top();
1098   // Shorthands.
1099   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1100   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1101   int num_free_chunks_since_last_med_boundary = -1;
1102   int num_free_chunks_since_last_small_boundary = -1;
1103   while (chunk < invalid_chunk ) {
1104     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1105     // Reset the counter when encountering a non-free chunk.
1106     if (chunk->get_chunk_type() != HumongousIndex) {
1107       if (chunk->is_tagged_free()) {
1108         // Count successive free, non-humongous chunks.
1109         if (is_aligned(chunk, size_small)) {
1110           assert(num_free_chunks_since_last_small_boundary <= 1,
1111                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1112           num_free_chunks_since_last_small_boundary = 0;
1113         } else if (num_free_chunks_since_last_small_boundary != -1) {
1114           num_free_chunks_since_last_small_boundary ++;
1115         }
1116         if (is_aligned(chunk, size_med)) {
1117           assert(num_free_chunks_since_last_med_boundary <= 1,
1118                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1119           num_free_chunks_since_last_med_boundary = 0;
1120         } else if (num_free_chunks_since_last_med_boundary != -1) {
1121           num_free_chunks_since_last_med_boundary ++;
1122         }
1123       } else {
1124         // Encountering a non-free chunk, reset counters.
1125         num_free_chunks_since_last_med_boundary = -1;
1126         num_free_chunks_since_last_small_boundary = -1;
1127       }
1128     } else {
1129       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1130       num_free_chunks_since_last_med_boundary = -1;
1131       num_free_chunks_since_last_small_boundary = -1;
1132     }
1133 
1134     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1135     chunk = (Metachunk*) next;
1136   }
1137 }
1138 #endif // ASSERT
1139 
1140 // List of VirtualSpaces for metadata allocation.
1141 class VirtualSpaceList : public CHeapObj<mtClass> {
1142   friend class VirtualSpaceNode;
1143 
1144   enum VirtualSpaceSizes {
1145     VirtualSpaceSize = 256 * K
1146   };
1147 
1148   // Head of the list
1149   VirtualSpaceNode* _virtual_space_list;
1150   // virtual space currently being used for allocations
1151   VirtualSpaceNode* _current_virtual_space;
1152 
1153   // Is this VirtualSpaceList used for the compressed class space
1154   bool _is_class;
1155 
1156   // Sum of reserved and committed memory in the virtual spaces
1157   size_t _reserved_words;
1158   size_t _committed_words;
1159 
1160   // Number of virtual spaces
1161   size_t _virtual_space_count;
1162 
1163   ~VirtualSpaceList();
1164 
1165   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1166 
1167   void set_virtual_space_list(VirtualSpaceNode* v) {
1168     _virtual_space_list = v;
1169   }
1170   void set_current_virtual_space(VirtualSpaceNode* v) {
1171     _current_virtual_space = v;
1172   }
1173 
1174   void link_vs(VirtualSpaceNode* new_entry);
1175 
1176   // Get another virtual space and add it to the list.  This
1177   // is typically prompted by a failed attempt to allocate a chunk
1178   // and is typically followed by the allocation of a chunk.
1179   bool create_new_virtual_space(size_t vs_word_size);
1180 
1181   // Chunk up the unused committed space in the current
1182   // virtual space and add the chunks to the free list.
1183   void retire_current_virtual_space();
1184 
1185  public:
1186   VirtualSpaceList(size_t word_size);
1187   VirtualSpaceList(ReservedSpace rs);
1188 
1189   size_t free_bytes();
1190 
1191   Metachunk* get_new_chunk(size_t chunk_word_size,
1192                            size_t suggested_commit_granularity);
1193 
1194   bool expand_node_by(VirtualSpaceNode* node,
1195                       size_t min_words,
1196                       size_t preferred_words);
1197 
1198   bool expand_by(size_t min_words,
1199                  size_t preferred_words);
1200 
1201   VirtualSpaceNode* current_virtual_space() {
1202     return _current_virtual_space;
1203   }
1204 
1205   bool is_class() const { return _is_class; }
1206 
1207   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1208 
1209   size_t reserved_words()  { return _reserved_words; }
1210   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1211   size_t committed_words() { return _committed_words; }
1212   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1213 
1214   void inc_reserved_words(size_t v);
1215   void dec_reserved_words(size_t v);
1216   void inc_committed_words(size_t v);
1217   void dec_committed_words(size_t v);
1218   void inc_virtual_space_count();
1219   void dec_virtual_space_count();
1220 
1221   bool contains(const void* ptr);
1222 
1223   // Unlink empty VirtualSpaceNodes and free it.
1224   void purge(ChunkManager* chunk_manager);
1225 
1226   void print_on(outputStream* st) const                 { print_on(st, K); }
1227   void print_on(outputStream* st, size_t scale) const;
1228   void print_map(outputStream* st) const;
1229 
1230   class VirtualSpaceListIterator : public StackObj {
1231     VirtualSpaceNode* _virtual_spaces;
1232    public:
1233     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1234       _virtual_spaces(virtual_spaces) {}
1235 
1236     bool repeat() {
1237       return _virtual_spaces != NULL;
1238     }
1239 
1240     VirtualSpaceNode* get_next() {
1241       VirtualSpaceNode* result = _virtual_spaces;
1242       if (_virtual_spaces != NULL) {
1243         _virtual_spaces = _virtual_spaces->next();
1244       }
1245       return result;
1246     }
1247   };
1248 };
1249 
1250 class Metadebug : AllStatic {
1251   // Debugging support for Metaspaces
1252   static int _allocation_fail_alot_count;
1253 
1254  public:
1255 
1256   static void init_allocation_fail_alot_count();
1257 #ifdef ASSERT
1258   static bool test_metadata_failure();
1259 #endif
1260 };
1261 
1262 int Metadebug::_allocation_fail_alot_count = 0;
1263 
1264 
1265 //  SpaceManager - used by Metaspace to handle allocations
1266 class SpaceManager : public CHeapObj<mtClass> {
1267   friend class ClassLoaderMetaspace;
1268   friend class Metadebug;
1269 
1270  private:
1271 
1272   // protects allocations
1273   Mutex* const _lock;
1274 
1275   // Type of metadata allocated.
1276   const Metaspace::MetadataType   _mdtype;
1277 
1278   // Type of metaspace
1279   const Metaspace::MetaspaceType  _space_type;
1280 
1281   // List of chunks in use by this SpaceManager.  Allocations
1282   // are done from the current chunk.  The list is used for deallocating
1283   // chunks when the SpaceManager is freed.
1284   Metachunk* _chunk_list;
1285   Metachunk* _current_chunk;
1286 
1287   // Maximum number of small chunks to allocate to a SpaceManager
1288   static uint const _small_chunk_limit;
1289 
1290   // Maximum number of specialize chunks to allocate for anonymous and delegating
1291   // metadata space to a SpaceManager
1292   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1293 
1294   // Some running counters, but lets keep their number small to not add to much to
1295   // the per-classloader footprint.
1296   // Note: capacity = used + free + waste + overhead. We do not keep running counters for
1297   // free and waste. Their sum can be deduced from the three other values.
1298   size_t _overhead_words;
1299   size_t _capacity_words;
1300   size_t _used_words;
1301   uintx _num_chunks_by_type[NumberOfInUseLists];
1302 
1303   // Free lists of blocks are per SpaceManager since they
1304   // are assumed to be in chunks in use by the SpaceManager
1305   // and all chunks in use by a SpaceManager are freed when
1306   // the class loader using the SpaceManager is collected.
1307   BlockFreelist* _block_freelists;
1308 
1309  private:
1310   // Accessors
1311   Metachunk* chunk_list() const { return _chunk_list; }
1312 
1313   BlockFreelist* block_freelists() const { return _block_freelists; }
1314 
1315   Metaspace::MetadataType mdtype() { return _mdtype; }
1316 
1317   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1318   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1319 
1320   Metachunk* current_chunk() const { return _current_chunk; }
1321   void set_current_chunk(Metachunk* v) {
1322     _current_chunk = v;
1323   }
1324 
1325   Metachunk* find_current_chunk(size_t word_size);
1326 
1327   // Add chunk to the list of chunks in use
1328   void add_chunk(Metachunk* v, bool make_current);
1329   void retire_current_chunk();
1330 
1331   Mutex* lock() const { return _lock; }
1332 
1333   // Adds to the given statistic object. Expects to be locked with lock().
1334   void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1335 
1336   // Verify internal counters against the current state. Expects to be locked with lock().
1337   DEBUG_ONLY(void verify_metrics_locked() const;)
1338 
1339  public:
1340   SpaceManager(Metaspace::MetadataType mdtype,
1341                Metaspace::MetaspaceType space_type,
1342                Mutex* lock);
1343   ~SpaceManager();
1344 
1345   enum ChunkMultiples {
1346     MediumChunkMultiple = 4
1347   };
1348 
1349   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1350   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1351   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1352 
1353   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1354 
1355   // Accessors
1356   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1357 
1358   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1359   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1360   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1361 
1362   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1363 
1364   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1365 
1366   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1367 
1368   size_t capacity_words() const     { return _capacity_words; }
1369   size_t used_words() const         { return _used_words; }
1370   size_t overhead_words() const     { return _overhead_words; }
1371 
1372   // Adjust local, global counters after a new chunk has been added.
1373   void account_for_new_chunk(const Metachunk* new_chunk);
1374 
1375   // Adjust local, global counters after space has been allocated from the current chunk.
1376   void account_for_allocation(size_t words);
1377 
1378   // Adjust global counters just before the SpaceManager dies, after all its chunks
1379   // have been returned to the freelist.
1380   void account_for_spacemanager_death();
1381 
1382   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1383   // or return the unadjusted size if the requested size is humongous.
1384   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1385   size_t adjust_initial_chunk_size(size_t requested) const;
1386 
1387   // Get the initial chunks size for this metaspace type.
1388   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1389 
1390   // Todo: remove this once we have counters by chunk type.
1391   uintx num_chunks_by_type(ChunkIndex chunk_type) const       { return _num_chunks_by_type[chunk_type]; }
1392 
1393   Metachunk* get_new_chunk(size_t chunk_word_size);
1394 
1395   // Block allocation and deallocation.
1396   // Allocates a block from the current chunk
1397   MetaWord* allocate(size_t word_size);
1398 
1399   // Helper for allocations
1400   MetaWord* allocate_work(size_t word_size);
1401 
1402   // Returns a block to the per manager freelist
1403   void deallocate(MetaWord* p, size_t word_size);
1404 
1405   // Based on the allocation size and a minimum chunk size,
1406   // returned chunk size (for expanding space for chunk allocation).
1407   size_t calc_chunk_size(size_t allocation_word_size);
1408 
1409   // Called when an allocation from the current chunk fails.
1410   // Gets a new chunk (may require getting a new virtual space),
1411   // and allocates from that chunk.
1412   MetaWord* grow_and_allocate(size_t word_size);
1413 
1414   // Notify memory usage to MemoryService.
1415   void track_metaspace_memory_usage();
1416 
1417   // debugging support.
1418 
1419   void print_on(outputStream* st) const;
1420   void locked_print_chunks_in_use_on(outputStream* st) const;
1421 
1422   void verify();
1423   void verify_chunk_size(Metachunk* chunk);
1424 
1425   // This adjusts the size given to be greater than the minimum allocation size in
1426   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1427   size_t get_allocation_word_size(size_t word_size) {
1428     size_t byte_size = word_size * BytesPerWord;
1429 
1430     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1431     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1432 
1433     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1434     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1435 
1436     return raw_word_size;
1437   }
1438 
1439   // Adds to the given statistic object.
1440   void add_to_statistics(SpaceManagerStatistics* out) const;
1441 
1442   // Verify internal counters against the current state.
1443   DEBUG_ONLY(void verify_metrics() const;)
1444 
1445 };
1446 
1447 uint const SpaceManager::_small_chunk_limit = 4;
1448 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1449 
1450 void VirtualSpaceNode::inc_container_count() {
1451   assert_lock_strong(MetaspaceExpand_lock);
1452   _container_count++;
1453 }
1454 
1455 void VirtualSpaceNode::dec_container_count() {
1456   assert_lock_strong(MetaspaceExpand_lock);
1457   _container_count--;
1458 }
1459 
1460 #ifdef ASSERT
1461 void VirtualSpaceNode::verify_container_count() {
1462   assert(_container_count == container_count_slow(),
1463          "Inconsistency in container_count _container_count " UINTX_FORMAT
1464          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1465 }
1466 #endif
1467 
1468 // BlockFreelist methods
1469 
1470 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1471 
1472 BlockFreelist::~BlockFreelist() {
1473   delete _dictionary;
1474   if (_small_blocks != NULL) {
1475     delete _small_blocks;
1476   }
1477 }
1478 
1479 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1480   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1481 
1482   Metablock* free_chunk = ::new (p) Metablock(word_size);
1483   if (word_size < SmallBlocks::small_block_max_size()) {
1484     small_blocks()->return_block(free_chunk, word_size);
1485   } else {
1486   dictionary()->return_chunk(free_chunk);
1487 }
1488   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1489             SIZE_FORMAT, p2i(free_chunk), word_size);
1490 }
1491 
1492 MetaWord* BlockFreelist::get_block(size_t word_size) {
1493   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1494 
1495   // Try small_blocks first.
1496   if (word_size < SmallBlocks::small_block_max_size()) {
1497     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1498     // this space manager.
1499     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1500     if (new_block != NULL) {
1501       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1502               p2i(new_block), word_size);
1503       return new_block;
1504     }
1505   }
1506 
1507   if (word_size < BlockFreelist::min_dictionary_size()) {
1508     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1509     return NULL;
1510   }
1511 
1512   Metablock* free_block = dictionary()->get_chunk(word_size);
1513   if (free_block == NULL) {
1514     return NULL;
1515   }
1516 
1517   const size_t block_size = free_block->size();
1518   if (block_size > WasteMultiplier * word_size) {
1519     return_block((MetaWord*)free_block, block_size);
1520     return NULL;
1521   }
1522 
1523   MetaWord* new_block = (MetaWord*)free_block;
1524   assert(block_size >= word_size, "Incorrect size of block from freelist");
1525   const size_t unused = block_size - word_size;
1526   if (unused >= SmallBlocks::small_block_min_size()) {
1527     return_block(new_block + word_size, unused);
1528   }
1529 
1530   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1531             p2i(new_block), word_size);
1532   return new_block;
1533 }
1534 
1535 void BlockFreelist::print_on(outputStream* st) const {
1536   dictionary()->print_free_lists(st);
1537   if (_small_blocks != NULL) {
1538     _small_blocks->print_on(st);
1539   }
1540 }
1541 
1542 // VirtualSpaceNode methods
1543 
1544 VirtualSpaceNode::~VirtualSpaceNode() {
1545   _rs.release();
1546   if (_occupancy_map != NULL) {
1547     delete _occupancy_map;
1548   }
1549 #ifdef ASSERT
1550   size_t word_size = sizeof(*this) / BytesPerWord;
1551   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1552 #endif
1553 }
1554 
1555 size_t VirtualSpaceNode::used_words_in_vs() const {
1556   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1557 }
1558 
1559 // Space committed in the VirtualSpace
1560 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1561   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1562 }
1563 
1564 size_t VirtualSpaceNode::free_words_in_vs() const {
1565   return pointer_delta(end(), top(), sizeof(MetaWord));
1566 }
1567 
1568 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1569 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1570 
1571   assert(target_top > top(), "Sanity");
1572 
1573   // Padding chunks are added to the freelist.
1574   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1575 
1576   // shorthands
1577   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1578   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1579   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1580 
1581   while (top() < target_top) {
1582 
1583     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1584     // for padding chunks, so it is not worth it.
1585     size_t padding_chunk_word_size = small_word_size;
1586     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1587       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1588       padding_chunk_word_size = spec_word_size;
1589     }
1590     MetaWord* here = top();
1591     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1592     inc_top(padding_chunk_word_size);
1593 
1594     // Create new padding chunk.
1595     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1596     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1597 
1598     Metachunk* const padding_chunk =
1599       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1600     assert(padding_chunk == (Metachunk*)here, "Sanity");
1601     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1602     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1603                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1604                                        (is_class() ? "class space " : "metaspace"),
1605                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1606 
1607     // Mark chunk start in occupancy map.
1608     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1609 
1610     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1611     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1612     // will assert that).
1613     do_update_in_use_info_for_chunk(padding_chunk, true);
1614 
1615     // Return Chunk to freelist.
1616     inc_container_count();
1617     chunk_manager->return_single_chunk(padding_chunk);
1618     // Please note: at this point, ChunkManager::return_single_chunk()
1619     // may already have merged the padding chunk with neighboring chunks, so
1620     // it may have vanished at this point. Do not reference the padding
1621     // chunk beyond this point.
1622   }
1623 
1624   assert(top() == target_top, "Sanity");
1625 
1626 } // allocate_padding_chunks_until_top_is_at()
1627 
1628 // Allocates the chunk from the virtual space only.
1629 // This interface is also used internally for debugging.  Not all
1630 // chunks removed here are necessarily used for allocation.
1631 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1632   // Non-humongous chunks are to be allocated aligned to their chunk
1633   // size. So, start addresses of medium chunks are aligned to medium
1634   // chunk size, those of small chunks to small chunk size and so
1635   // forth. This facilitates merging of free chunks and reduces
1636   // fragmentation. Chunk sizes are spec < small < medium, with each
1637   // larger chunk size being a multiple of the next smaller chunk
1638   // size.
1639   // Because of this alignment, me may need to create a number of padding
1640   // chunks. These chunks are created and added to the freelist.
1641 
1642   // The chunk manager to which we will give our padding chunks.
1643   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1644 
1645   // shorthands
1646   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1647   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1648   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1649 
1650   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1651          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1652 
1653   // Chunk alignment (in bytes) == chunk size unless humongous.
1654   // Humongous chunks are aligned to the smallest chunk size (spec).
1655   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1656                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1657 
1658   // Do we have enough space to create the requested chunk plus
1659   // any padding chunks needed?
1660   MetaWord* const next_aligned =
1661     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1662   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1663     return NULL;
1664   }
1665 
1666   // Before allocating the requested chunk, allocate padding chunks if necessary.
1667   // We only need to do this for small or medium chunks: specialized chunks are the
1668   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1669   // (implicitly, also aligned to smallest chunk size).
1670   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1671     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1672         (is_class() ? "class space " : "metaspace"),
1673         top(), next_aligned);
1674     allocate_padding_chunks_until_top_is_at(next_aligned);
1675     // Now, top should be aligned correctly.
1676     assert_is_aligned(top(), required_chunk_alignment);
1677   }
1678 
1679   // Now, top should be aligned correctly.
1680   assert_is_aligned(top(), required_chunk_alignment);
1681 
1682   // Bottom of the new chunk
1683   MetaWord* chunk_limit = top();
1684   assert(chunk_limit != NULL, "Not safe to call this method");
1685 
1686   // The virtual spaces are always expanded by the
1687   // commit granularity to enforce the following condition.
1688   // Without this the is_available check will not work correctly.
1689   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1690       "The committed memory doesn't match the expanded memory.");
1691 
1692   if (!is_available(chunk_word_size)) {
1693     LogTarget(Debug, gc, metaspace, freelist) lt;
1694     if (lt.is_enabled()) {
1695       LogStream ls(lt);
1696       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1697       // Dump some information about the virtual space that is nearly full
1698       print_on(&ls);
1699     }
1700     return NULL;
1701   }
1702 
1703   // Take the space  (bump top on the current virtual space).
1704   inc_top(chunk_word_size);
1705 
1706   // Initialize the chunk
1707   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1708   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1709   assert(result == (Metachunk*)chunk_limit, "Sanity");
1710   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1711   do_update_in_use_info_for_chunk(result, true);
1712 
1713   inc_container_count();
1714 
1715   if (VerifyMetaspace) {
1716     DEBUG_ONLY(chunk_manager->locked_verify());
1717     DEBUG_ONLY(this->verify());
1718   }
1719 
1720   DEBUG_ONLY(do_verify_chunk(result));
1721 
1722   result->inc_use_count();
1723 
1724   return result;
1725 }
1726 
1727 
1728 // Expand the virtual space (commit more of the reserved space)
1729 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1730   size_t min_bytes = min_words * BytesPerWord;
1731   size_t preferred_bytes = preferred_words * BytesPerWord;
1732 
1733   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1734 
1735   if (uncommitted < min_bytes) {
1736     return false;
1737   }
1738 
1739   size_t commit = MIN2(preferred_bytes, uncommitted);
1740   bool result = virtual_space()->expand_by(commit, false);
1741 
1742   if (result) {
1743     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1744               (is_class() ? "class" : "non-class"), commit);
1745     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
1746   } else {
1747     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1748               (is_class() ? "class" : "non-class"), commit);
1749   }
1750 
1751   assert(result, "Failed to commit memory");
1752 
1753   return result;
1754 }
1755 
1756 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1757   assert_lock_strong(MetaspaceExpand_lock);
1758   Metachunk* result = take_from_committed(chunk_word_size);
1759   return result;
1760 }
1761 
1762 bool VirtualSpaceNode::initialize() {
1763 
1764   if (!_rs.is_reserved()) {
1765     return false;
1766   }
1767 
1768   // These are necessary restriction to make sure that the virtual space always
1769   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1770   // aligned only the middle alignment of the VirtualSpace is used.
1771   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1772   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1773 
1774   // ReservedSpaces marked as special will have the entire memory
1775   // pre-committed. Setting a committed size will make sure that
1776   // committed_size and actual_committed_size agrees.
1777   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1778 
1779   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1780                                             Metaspace::commit_alignment());
1781   if (result) {
1782     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1783         "Checking that the pre-committed memory was registered by the VirtualSpace");
1784 
1785     set_top((MetaWord*)virtual_space()->low());
1786     set_reserved(MemRegion((HeapWord*)_rs.base(),
1787                  (HeapWord*)(_rs.base() + _rs.size())));
1788 
1789     assert(reserved()->start() == (HeapWord*) _rs.base(),
1790            "Reserved start was not set properly " PTR_FORMAT
1791            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1792     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1793            "Reserved size was not set properly " SIZE_FORMAT
1794            " != " SIZE_FORMAT, reserved()->word_size(),
1795            _rs.size() / BytesPerWord);
1796   }
1797 
1798   // Initialize Occupancy Map.
1799   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1800   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1801 
1802   return result;
1803 }
1804 
1805 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
1806   size_t used_words = used_words_in_vs();
1807   size_t commit_words = committed_words();
1808   size_t res_words = reserved_words();
1809   VirtualSpace* vs = virtual_space();
1810 
1811   st->print("node @" PTR_FORMAT ": ", p2i(this));
1812   st->print("reserved=");
1813   print_scaled_words(st, res_words, scale);
1814   st->print(", committed=");
1815   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
1816   st->print(", used=");
1817   print_scaled_words_and_percentage(st, used_words, res_words, scale);
1818   st->cr();
1819   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
1820            PTR_FORMAT ", " PTR_FORMAT ")",
1821            p2i(bottom()), p2i(top()), p2i(end()),
1822            p2i(vs->high_boundary()));
1823 }
1824 
1825 #ifdef ASSERT
1826 void VirtualSpaceNode::mangle() {
1827   size_t word_size = capacity_words_in_vs();
1828   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1829 }
1830 #endif // ASSERT
1831 
1832 // VirtualSpaceList methods
1833 // Space allocated from the VirtualSpace
1834 
1835 VirtualSpaceList::~VirtualSpaceList() {
1836   VirtualSpaceListIterator iter(virtual_space_list());
1837   while (iter.repeat()) {
1838     VirtualSpaceNode* vsl = iter.get_next();
1839     delete vsl;
1840   }
1841 }
1842 
1843 void VirtualSpaceList::inc_reserved_words(size_t v) {
1844   assert_lock_strong(MetaspaceExpand_lock);
1845   _reserved_words = _reserved_words + v;
1846 }
1847 void VirtualSpaceList::dec_reserved_words(size_t v) {
1848   assert_lock_strong(MetaspaceExpand_lock);
1849   _reserved_words = _reserved_words - v;
1850 }
1851 
1852 #define assert_committed_below_limit()                        \
1853   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1854          "Too much committed memory. Committed: " SIZE_FORMAT \
1855          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1856           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1857 
1858 void VirtualSpaceList::inc_committed_words(size_t v) {
1859   assert_lock_strong(MetaspaceExpand_lock);
1860   _committed_words = _committed_words + v;
1861 
1862   assert_committed_below_limit();
1863 }
1864 void VirtualSpaceList::dec_committed_words(size_t v) {
1865   assert_lock_strong(MetaspaceExpand_lock);
1866   _committed_words = _committed_words - v;
1867 
1868   assert_committed_below_limit();
1869 }
1870 
1871 void VirtualSpaceList::inc_virtual_space_count() {
1872   assert_lock_strong(MetaspaceExpand_lock);
1873   _virtual_space_count++;
1874 }
1875 void VirtualSpaceList::dec_virtual_space_count() {
1876   assert_lock_strong(MetaspaceExpand_lock);
1877   _virtual_space_count--;
1878 }
1879 
1880 void ChunkManager::remove_chunk(Metachunk* chunk) {
1881   size_t word_size = chunk->word_size();
1882   ChunkIndex index = list_index(word_size);
1883   if (index != HumongousIndex) {
1884     free_chunks(index)->remove_chunk(chunk);
1885   } else {
1886     humongous_dictionary()->remove_chunk(chunk);
1887   }
1888 
1889   // Chunk has been removed from the chunks free list, update counters.
1890   account_for_removed_chunk(chunk);
1891 }
1892 
1893 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1894   assert_lock_strong(MetaspaceExpand_lock);
1895   assert(chunk != NULL, "invalid chunk pointer");
1896   // Check for valid merge combinations.
1897   assert((chunk->get_chunk_type() == SpecializedIndex &&
1898           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1899          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1900         "Invalid chunk merge combination.");
1901 
1902   const size_t target_chunk_word_size =
1903     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1904 
1905   // [ prospective merge region )
1906   MetaWord* const p_merge_region_start =
1907     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1908   MetaWord* const p_merge_region_end =
1909     p_merge_region_start + target_chunk_word_size;
1910 
1911   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1912   VirtualSpaceNode* const vsn = chunk->container();
1913   OccupancyMap* const ocmap = vsn->occupancy_map();
1914 
1915   // The prospective chunk merge range must be completely contained by the
1916   // committed range of the virtual space node.
1917   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1918     return false;
1919   }
1920 
1921   // Only attempt to merge this range if at its start a chunk starts and at its end
1922   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1923   // of that range, we cannot merge.
1924   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1925     return false;
1926   }
1927   if (p_merge_region_end < vsn->top() &&
1928       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1929     return false;
1930   }
1931 
1932   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1933   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1934     return false;
1935   }
1936 
1937   // Success! Remove all chunks in this region...
1938   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1939     (is_class() ? "class space" : "metaspace"),
1940     p_merge_region_start, p_merge_region_end);
1941 
1942   const int num_chunks_removed =
1943     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1944 
1945   // ... and create a single new bigger chunk.
1946   Metachunk* const p_new_chunk =
1947       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1948   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1949   p_new_chunk->set_origin(origin_merge);
1950 
1951   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1952     (is_class() ? "class space" : "metaspace"),
1953     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1954 
1955   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1956   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1957   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1958 
1959   // Mark chunk as free. Note: it is not necessary to update the occupancy
1960   // map in-use map, because the old chunks were also free, so nothing
1961   // should have changed.
1962   p_new_chunk->set_is_tagged_free(true);
1963 
1964   // Add new chunk to its freelist.
1965   ChunkList* const list = free_chunks(target_chunk_type);
1966   list->return_chunk_at_head(p_new_chunk);
1967 
1968   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1969   // should not have changed, because the size of the space should be the same)
1970   _free_chunks_count -= num_chunks_removed;
1971   _free_chunks_count ++;
1972 
1973   // VirtualSpaceNode::container_count does not have to be modified:
1974   // it means "number of active (non-free) chunks", so merging free chunks
1975   // should not affect that count.
1976 
1977   // At the end of a chunk merge, run verification tests.
1978   if (VerifyMetaspace) {
1979     DEBUG_ONLY(this->locked_verify());
1980     DEBUG_ONLY(vsn->verify());
1981   }
1982 
1983   return true;
1984 }
1985 
1986 // Remove all chunks in the given area - the chunks are supposed to be free -
1987 // from their corresponding freelists. Mark them as invalid.
1988 // - This does not correct the occupancy map.
1989 // - This does not adjust the counters in ChunkManager.
1990 // - Does not adjust container count counter in containing VirtualSpaceNode
1991 // Returns number of chunks removed.
1992 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1993   assert(p != NULL && word_size > 0, "Invalid range.");
1994   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1995   assert_is_aligned(word_size, smallest_chunk_size);
1996 
1997   Metachunk* const start = (Metachunk*) p;
1998   const Metachunk* const end = (Metachunk*)(p + word_size);
1999   Metachunk* cur = start;
2000   int num_removed = 0;
2001   while (cur < end) {
2002     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
2003     DEBUG_ONLY(do_verify_chunk(cur));
2004     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
2005     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
2006     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
2007       (is_class() ? "class space" : "metaspace"),
2008       cur, cur->word_size() * sizeof(MetaWord));
2009     cur->remove_sentinel();
2010     // Note: cannot call ChunkManager::remove_chunk, because that
2011     // modifies the counters in ChunkManager, which we do not want. So
2012     // we call remove_chunk on the freelist directly (see also the
2013     // splitting function which does the same).
2014     ChunkList* const list = free_chunks(list_index(cur->word_size()));
2015     list->remove_chunk(cur);
2016     num_removed ++;
2017     cur = next;
2018   }
2019   return num_removed;
2020 }
2021 
2022 // Walk the list of VirtualSpaceNodes and delete
2023 // nodes with a 0 container_count.  Remove Metachunks in
2024 // the node from their respective freelists.
2025 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2026   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2027   assert_lock_strong(MetaspaceExpand_lock);
2028   // Don't use a VirtualSpaceListIterator because this
2029   // list is being changed and a straightforward use of an iterator is not safe.
2030   VirtualSpaceNode* purged_vsl = NULL;
2031   VirtualSpaceNode* prev_vsl = virtual_space_list();
2032   VirtualSpaceNode* next_vsl = prev_vsl;
2033   while (next_vsl != NULL) {
2034     VirtualSpaceNode* vsl = next_vsl;
2035     DEBUG_ONLY(vsl->verify_container_count();)
2036     next_vsl = vsl->next();
2037     // Don't free the current virtual space since it will likely
2038     // be needed soon.
2039     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2040       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2041                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2042       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2043       // Unlink it from the list
2044       if (prev_vsl == vsl) {
2045         // This is the case of the current node being the first node.
2046         assert(vsl == virtual_space_list(), "Expected to be the first node");
2047         set_virtual_space_list(vsl->next());
2048       } else {
2049         prev_vsl->set_next(vsl->next());
2050       }
2051 
2052       vsl->purge(chunk_manager);
2053       dec_reserved_words(vsl->reserved_words());
2054       dec_committed_words(vsl->committed_words());
2055       dec_virtual_space_count();
2056       purged_vsl = vsl;
2057       delete vsl;
2058     } else {
2059       prev_vsl = vsl;
2060     }
2061   }
2062 #ifdef ASSERT
2063   if (purged_vsl != NULL) {
2064     // List should be stable enough to use an iterator here.
2065     VirtualSpaceListIterator iter(virtual_space_list());
2066     while (iter.repeat()) {
2067       VirtualSpaceNode* vsl = iter.get_next();
2068       assert(vsl != purged_vsl, "Purge of vsl failed");
2069     }
2070   }
2071 #endif
2072 }
2073 
2074 
2075 // This function looks at the mmap regions in the metaspace without locking.
2076 // The chunks are added with store ordering and not deleted except for at
2077 // unloading time during a safepoint.
2078 bool VirtualSpaceList::contains(const void* ptr) {
2079   // List should be stable enough to use an iterator here because removing virtual
2080   // space nodes is only allowed at a safepoint.
2081   VirtualSpaceListIterator iter(virtual_space_list());
2082   while (iter.repeat()) {
2083     VirtualSpaceNode* vsn = iter.get_next();
2084     if (vsn->contains(ptr)) {
2085       return true;
2086     }
2087   }
2088   return false;
2089 }
2090 
2091 void VirtualSpaceList::retire_current_virtual_space() {
2092   assert_lock_strong(MetaspaceExpand_lock);
2093 
2094   VirtualSpaceNode* vsn = current_virtual_space();
2095 
2096   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2097                                   Metaspace::chunk_manager_metadata();
2098 
2099   vsn->retire(cm);
2100 }
2101 
2102 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2103   DEBUG_ONLY(verify_container_count();)
2104   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2105   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2106     ChunkIndex index = (ChunkIndex)i;
2107     size_t chunk_size = chunk_manager->size_by_index(index);
2108 
2109     while (free_words_in_vs() >= chunk_size) {
2110       Metachunk* chunk = get_chunk_vs(chunk_size);
2111       // Chunk will be allocated aligned, so allocation may require
2112       // additional padding chunks. That may cause above allocation to
2113       // fail. Just ignore the failed allocation and continue with the
2114       // next smaller chunk size. As the VirtualSpaceNode comitted
2115       // size should be a multiple of the smallest chunk size, we
2116       // should always be able to fill the VirtualSpace completely.
2117       if (chunk == NULL) {
2118         break;
2119       }
2120       chunk_manager->return_single_chunk(chunk);
2121     }
2122     DEBUG_ONLY(verify_container_count();)
2123   }
2124   assert(free_words_in_vs() == 0, "should be empty now");
2125 }
2126 
2127 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2128                                    _is_class(false),
2129                                    _virtual_space_list(NULL),
2130                                    _current_virtual_space(NULL),
2131                                    _reserved_words(0),
2132                                    _committed_words(0),
2133                                    _virtual_space_count(0) {
2134   MutexLockerEx cl(MetaspaceExpand_lock,
2135                    Mutex::_no_safepoint_check_flag);
2136   create_new_virtual_space(word_size);
2137 }
2138 
2139 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2140                                    _is_class(true),
2141                                    _virtual_space_list(NULL),
2142                                    _current_virtual_space(NULL),
2143                                    _reserved_words(0),
2144                                    _committed_words(0),
2145                                    _virtual_space_count(0) {
2146   MutexLockerEx cl(MetaspaceExpand_lock,
2147                    Mutex::_no_safepoint_check_flag);
2148   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2149   bool succeeded = class_entry->initialize();
2150   if (succeeded) {
2151     link_vs(class_entry);
2152   }
2153 }
2154 
2155 size_t VirtualSpaceList::free_bytes() {
2156   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2157 }
2158 
2159 // Allocate another meta virtual space and add it to the list.
2160 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2161   assert_lock_strong(MetaspaceExpand_lock);
2162 
2163   if (is_class()) {
2164     assert(false, "We currently don't support more than one VirtualSpace for"
2165                   " the compressed class space. The initialization of the"
2166                   " CCS uses another code path and should not hit this path.");
2167     return false;
2168   }
2169 
2170   if (vs_word_size == 0) {
2171     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2172     return false;
2173   }
2174 
2175   // Reserve the space
2176   size_t vs_byte_size = vs_word_size * BytesPerWord;
2177   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2178 
2179   // Allocate the meta virtual space and initialize it.
2180   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2181   if (!new_entry->initialize()) {
2182     delete new_entry;
2183     return false;
2184   } else {
2185     assert(new_entry->reserved_words() == vs_word_size,
2186         "Reserved memory size differs from requested memory size");
2187     // ensure lock-free iteration sees fully initialized node
2188     OrderAccess::storestore();
2189     link_vs(new_entry);
2190     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2191     return true;
2192   }
2193 }
2194 
2195 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2196   if (virtual_space_list() == NULL) {
2197       set_virtual_space_list(new_entry);
2198   } else {
2199     current_virtual_space()->set_next(new_entry);
2200   }
2201   set_current_virtual_space(new_entry);
2202   inc_reserved_words(new_entry->reserved_words());
2203   inc_committed_words(new_entry->committed_words());
2204   inc_virtual_space_count();
2205 #ifdef ASSERT
2206   new_entry->mangle();
2207 #endif
2208   LogTarget(Trace, gc, metaspace) lt;
2209   if (lt.is_enabled()) {
2210     LogStream ls(lt);
2211     VirtualSpaceNode* vsl = current_virtual_space();
2212     ResourceMark rm;
2213     vsl->print_on(&ls);
2214   }
2215 }
2216 
2217 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2218                                       size_t min_words,
2219                                       size_t preferred_words) {
2220   size_t before = node->committed_words();
2221 
2222   bool result = node->expand_by(min_words, preferred_words);
2223 
2224   size_t after = node->committed_words();
2225 
2226   // after and before can be the same if the memory was pre-committed.
2227   assert(after >= before, "Inconsistency");
2228   inc_committed_words(after - before);
2229 
2230   return result;
2231 }
2232 
2233 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2234   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2235   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2236   assert(min_words <= preferred_words, "Invalid arguments");
2237 
2238   const char* const class_or_not = (is_class() ? "class" : "non-class");
2239 
2240   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2241     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2242               class_or_not);
2243     return  false;
2244   }
2245 
2246   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2247   if (allowed_expansion_words < min_words) {
2248     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2249               class_or_not);
2250     return false;
2251   }
2252 
2253   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2254 
2255   // Commit more memory from the the current virtual space.
2256   bool vs_expanded = expand_node_by(current_virtual_space(),
2257                                     min_words,
2258                                     max_expansion_words);
2259   if (vs_expanded) {
2260      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2261                class_or_not);
2262      return true;
2263   }
2264   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2265             class_or_not);
2266   retire_current_virtual_space();
2267 
2268   // Get another virtual space.
2269   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2270   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2271 
2272   if (create_new_virtual_space(grow_vs_words)) {
2273     if (current_virtual_space()->is_pre_committed()) {
2274       // The memory was pre-committed, so we are done here.
2275       assert(min_words <= current_virtual_space()->committed_words(),
2276           "The new VirtualSpace was pre-committed, so it"
2277           "should be large enough to fit the alloc request.");
2278       return true;
2279     }
2280 
2281     return expand_node_by(current_virtual_space(),
2282                           min_words,
2283                           max_expansion_words);
2284   }
2285 
2286   return false;
2287 }
2288 
2289 // Given a chunk, calculate the largest possible padding space which
2290 // could be required when allocating it.
2291 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2292   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2293   if (chunk_type != HumongousIndex) {
2294     // Normal, non-humongous chunks are allocated at chunk size
2295     // boundaries, so the largest padding space required would be that
2296     // minus the smallest chunk size.
2297     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2298     return chunk_word_size - smallest_chunk_size;
2299   } else {
2300     // Humongous chunks are allocated at smallest-chunksize
2301     // boundaries, so there is no padding required.
2302     return 0;
2303   }
2304 }
2305 
2306 
2307 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2308 
2309   // Allocate a chunk out of the current virtual space.
2310   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2311 
2312   if (next != NULL) {
2313     return next;
2314   }
2315 
2316   // The expand amount is currently only determined by the requested sizes
2317   // and not how much committed memory is left in the current virtual space.
2318 
2319   // We must have enough space for the requested size and any
2320   // additional reqired padding chunks.
2321   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2322 
2323   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2324   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2325   if (min_word_size >= preferred_word_size) {
2326     // Can happen when humongous chunks are allocated.
2327     preferred_word_size = min_word_size;
2328   }
2329 
2330   bool expanded = expand_by(min_word_size, preferred_word_size);
2331   if (expanded) {
2332     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2333     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2334   }
2335 
2336    return next;
2337 }
2338 
2339 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2340   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2341       _virtual_space_count, p2i(_current_virtual_space));
2342   VirtualSpaceListIterator iter(virtual_space_list());
2343   while (iter.repeat()) {
2344     st->cr();
2345     VirtualSpaceNode* node = iter.get_next();
2346     node->print_on(st, scale);
2347   }
2348 }
2349 
2350 void VirtualSpaceList::print_map(outputStream* st) const {
2351   VirtualSpaceNode* list = virtual_space_list();
2352   VirtualSpaceListIterator iter(list);
2353   unsigned i = 0;
2354   while (iter.repeat()) {
2355     st->print_cr("Node %u:", i);
2356     VirtualSpaceNode* node = iter.get_next();
2357     node->print_map(st, this->is_class());
2358     i ++;
2359   }
2360 }
2361 
2362 // MetaspaceGC methods
2363 
2364 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2365 // Within the VM operation after the GC the attempt to allocate the metadata
2366 // should succeed.  If the GC did not free enough space for the metaspace
2367 // allocation, the HWM is increased so that another virtualspace will be
2368 // allocated for the metadata.  With perm gen the increase in the perm
2369 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2370 // metaspace policy uses those as the small and large steps for the HWM.
2371 //
2372 // After the GC the compute_new_size() for MetaspaceGC is called to
2373 // resize the capacity of the metaspaces.  The current implementation
2374 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2375 // to resize the Java heap by some GC's.  New flags can be implemented
2376 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2377 // free space is desirable in the metaspace capacity to decide how much
2378 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2379 // free space is desirable in the metaspace capacity before decreasing
2380 // the HWM.
2381 
2382 // Calculate the amount to increase the high water mark (HWM).
2383 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2384 // another expansion is not requested too soon.  If that is not
2385 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2386 // If that is still not enough, expand by the size of the allocation
2387 // plus some.
2388 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2389   size_t min_delta = MinMetaspaceExpansion;
2390   size_t max_delta = MaxMetaspaceExpansion;
2391   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2392 
2393   if (delta <= min_delta) {
2394     delta = min_delta;
2395   } else if (delta <= max_delta) {
2396     // Don't want to hit the high water mark on the next
2397     // allocation so make the delta greater than just enough
2398     // for this allocation.
2399     delta = max_delta;
2400   } else {
2401     // This allocation is large but the next ones are probably not
2402     // so increase by the minimum.
2403     delta = delta + min_delta;
2404   }
2405 
2406   assert_is_aligned(delta, Metaspace::commit_alignment());
2407 
2408   return delta;
2409 }
2410 
2411 size_t MetaspaceGC::capacity_until_GC() {
2412   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2413   assert(value >= MetaspaceSize, "Not initialized properly?");
2414   return value;
2415 }
2416 
2417 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2418   assert_is_aligned(v, Metaspace::commit_alignment());
2419 
2420   intptr_t capacity_until_GC = _capacity_until_GC;
2421   intptr_t new_value = capacity_until_GC + v;
2422 
2423   if (new_value < capacity_until_GC) {
2424     // The addition wrapped around, set new_value to aligned max value.
2425     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2426   }
2427 
2428   intptr_t expected = _capacity_until_GC;
2429   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2430 
2431   if (expected != actual) {
2432     return false;
2433   }
2434 
2435   if (new_cap_until_GC != NULL) {
2436     *new_cap_until_GC = new_value;
2437   }
2438   if (old_cap_until_GC != NULL) {
2439     *old_cap_until_GC = capacity_until_GC;
2440   }
2441   return true;
2442 }
2443 
2444 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2445   assert_is_aligned(v, Metaspace::commit_alignment());
2446 
2447   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2448 }
2449 
2450 void MetaspaceGC::initialize() {
2451   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2452   // we can't do a GC during initialization.
2453   _capacity_until_GC = MaxMetaspaceSize;
2454 }
2455 
2456 void MetaspaceGC::post_initialize() {
2457   // Reset the high-water mark once the VM initialization is done.
2458   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2459 }
2460 
2461 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2462   // Check if the compressed class space is full.
2463   if (is_class && Metaspace::using_class_space()) {
2464     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2465     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2466       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2467                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2468       return false;
2469     }
2470   }
2471 
2472   // Check if the user has imposed a limit on the metaspace memory.
2473   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2474   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2475     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2476               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2477     return false;
2478   }
2479 
2480   return true;
2481 }
2482 
2483 size_t MetaspaceGC::allowed_expansion() {
2484   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2485   size_t capacity_until_gc = capacity_until_GC();
2486 
2487   assert(capacity_until_gc >= committed_bytes,
2488          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2489          capacity_until_gc, committed_bytes);
2490 
2491   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2492   size_t left_until_GC = capacity_until_gc - committed_bytes;
2493   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2494   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2495             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2496             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2497 
2498   return left_to_commit / BytesPerWord;
2499 }
2500 
2501 void MetaspaceGC::compute_new_size() {
2502   assert(_shrink_factor <= 100, "invalid shrink factor");
2503   uint current_shrink_factor = _shrink_factor;
2504   _shrink_factor = 0;
2505 
2506   // Using committed_bytes() for used_after_gc is an overestimation, since the
2507   // chunk free lists are included in committed_bytes() and the memory in an
2508   // un-fragmented chunk free list is available for future allocations.
2509   // However, if the chunk free lists becomes fragmented, then the memory may
2510   // not be available for future allocations and the memory is therefore "in use".
2511   // Including the chunk free lists in the definition of "in use" is therefore
2512   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2513   // shrink below committed_bytes() and this has caused serious bugs in the past.
2514   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2515   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2516 
2517   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2518   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2519 
2520   const double min_tmp = used_after_gc / maximum_used_percentage;
2521   size_t minimum_desired_capacity =
2522     (size_t)MIN2(min_tmp, double(max_uintx));
2523   // Don't shrink less than the initial generation size
2524   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2525                                   MetaspaceSize);
2526 
2527   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2528   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2529                            minimum_free_percentage, maximum_used_percentage);
2530   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2531 
2532 
2533   size_t shrink_bytes = 0;
2534   if (capacity_until_GC < minimum_desired_capacity) {
2535     // If we have less capacity below the metaspace HWM, then
2536     // increment the HWM.
2537     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2538     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2539     // Don't expand unless it's significant
2540     if (expand_bytes >= MinMetaspaceExpansion) {
2541       size_t new_capacity_until_GC = 0;
2542       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2543       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2544 
2545       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2546                                                new_capacity_until_GC,
2547                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2548       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2549                                minimum_desired_capacity / (double) K,
2550                                expand_bytes / (double) K,
2551                                MinMetaspaceExpansion / (double) K,
2552                                new_capacity_until_GC / (double) K);
2553     }
2554     return;
2555   }
2556 
2557   // No expansion, now see if we want to shrink
2558   // We would never want to shrink more than this
2559   assert(capacity_until_GC >= minimum_desired_capacity,
2560          SIZE_FORMAT " >= " SIZE_FORMAT,
2561          capacity_until_GC, minimum_desired_capacity);
2562   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2563 
2564   // Should shrinking be considered?
2565   if (MaxMetaspaceFreeRatio < 100) {
2566     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2567     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2568     const double max_tmp = used_after_gc / minimum_used_percentage;
2569     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2570     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2571                                     MetaspaceSize);
2572     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2573                              maximum_free_percentage, minimum_used_percentage);
2574     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2575                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2576 
2577     assert(minimum_desired_capacity <= maximum_desired_capacity,
2578            "sanity check");
2579 
2580     if (capacity_until_GC > maximum_desired_capacity) {
2581       // Capacity too large, compute shrinking size
2582       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2583       // We don't want shrink all the way back to initSize if people call
2584       // System.gc(), because some programs do that between "phases" and then
2585       // we'd just have to grow the heap up again for the next phase.  So we
2586       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2587       // on the third call, and 100% by the fourth call.  But if we recompute
2588       // size without shrinking, it goes back to 0%.
2589       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2590 
2591       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2592 
2593       assert(shrink_bytes <= max_shrink_bytes,
2594              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2595              shrink_bytes, max_shrink_bytes);
2596       if (current_shrink_factor == 0) {
2597         _shrink_factor = 10;
2598       } else {
2599         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2600       }
2601       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2602                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2603       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2604                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2605     }
2606   }
2607 
2608   // Don't shrink unless it's significant
2609   if (shrink_bytes >= MinMetaspaceExpansion &&
2610       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2611     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2612     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2613                                              new_capacity_until_GC,
2614                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2615   }
2616 }
2617 
2618 // Metadebug methods
2619 
2620 void Metadebug::init_allocation_fail_alot_count() {
2621   if (MetadataAllocationFailALot) {
2622     _allocation_fail_alot_count =
2623       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2624   }
2625 }
2626 
2627 #ifdef ASSERT
2628 bool Metadebug::test_metadata_failure() {
2629   if (MetadataAllocationFailALot &&
2630       Threads::is_vm_complete()) {
2631     if (_allocation_fail_alot_count > 0) {
2632       _allocation_fail_alot_count--;
2633     } else {
2634       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2635       init_allocation_fail_alot_count();
2636       return true;
2637     }
2638   }
2639   return false;
2640 }
2641 #endif
2642 
2643 // ChunkManager methods
2644 size_t ChunkManager::free_chunks_total_words() {
2645   return _free_chunks_total;
2646 }
2647 
2648 size_t ChunkManager::free_chunks_total_bytes() {
2649   return free_chunks_total_words() * BytesPerWord;
2650 }
2651 
2652 // Update internal accounting after a chunk was added
2653 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2654   assert_lock_strong(MetaspaceExpand_lock);
2655   _free_chunks_count ++;
2656   _free_chunks_total += c->word_size();
2657 }
2658 
2659 // Update internal accounting after a chunk was removed
2660 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2661   assert_lock_strong(MetaspaceExpand_lock);
2662   assert(_free_chunks_count >= 1,
2663     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2664   assert(_free_chunks_total >= c->word_size(),
2665     "ChunkManager::_free_chunks_total: about to go negative"
2666      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2667   _free_chunks_count --;
2668   _free_chunks_total -= c->word_size();
2669 }
2670 
2671 size_t ChunkManager::free_chunks_count() {
2672 #ifdef ASSERT
2673   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
2674     MutexLockerEx cl(MetaspaceExpand_lock,
2675                      Mutex::_no_safepoint_check_flag);
2676     // This lock is only needed in debug because the verification
2677     // of the _free_chunks_totals walks the list of free chunks
2678     slow_locked_verify_free_chunks_count();
2679   }
2680 #endif
2681   return _free_chunks_count;
2682 }
2683 
2684 ChunkIndex ChunkManager::list_index(size_t size) {
2685   return get_chunk_type_by_size(size, is_class());
2686 }
2687 
2688 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2689   index_bounds_check(index);
2690   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2691   return get_size_for_nonhumongous_chunktype(index, is_class());
2692 }
2693 
2694 void ChunkManager::locked_verify_free_chunks_total() {
2695   assert_lock_strong(MetaspaceExpand_lock);
2696   assert(sum_free_chunks() == _free_chunks_total,
2697          "_free_chunks_total " SIZE_FORMAT " is not the"
2698          " same as sum " SIZE_FORMAT, _free_chunks_total,
2699          sum_free_chunks());
2700 }
2701 
2702 void ChunkManager::locked_verify_free_chunks_count() {
2703   assert_lock_strong(MetaspaceExpand_lock);
2704   assert(sum_free_chunks_count() == _free_chunks_count,
2705          "_free_chunks_count " SIZE_FORMAT " is not the"
2706          " same as sum " SIZE_FORMAT, _free_chunks_count,
2707          sum_free_chunks_count());
2708 }
2709 
2710 void ChunkManager::verify() {
2711   MutexLockerEx cl(MetaspaceExpand_lock,
2712                      Mutex::_no_safepoint_check_flag);
2713   locked_verify();
2714 }
2715 
2716 void ChunkManager::locked_verify() {
2717   locked_verify_free_chunks_count();
2718   locked_verify_free_chunks_total();
2719   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2720     ChunkList* list = free_chunks(i);
2721     if (list != NULL) {
2722       Metachunk* chunk = list->head();
2723       while (chunk) {
2724         DEBUG_ONLY(do_verify_chunk(chunk);)
2725         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2726         chunk = chunk->next();
2727       }
2728     }
2729   }
2730 }
2731 
2732 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2733   assert_lock_strong(MetaspaceExpand_lock);
2734   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2735                 _free_chunks_total, _free_chunks_count);
2736 }
2737 
2738 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2739   assert_lock_strong(MetaspaceExpand_lock);
2740   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2741                 sum_free_chunks(), sum_free_chunks_count());
2742 }
2743 
2744 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2745   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2746          "Bad index: %d", (int)index);
2747 
2748   return &_free_chunks[index];
2749 }
2750 
2751 // These methods that sum the free chunk lists are used in printing
2752 // methods that are used in product builds.
2753 size_t ChunkManager::sum_free_chunks() {
2754   assert_lock_strong(MetaspaceExpand_lock);
2755   size_t result = 0;
2756   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2757     ChunkList* list = free_chunks(i);
2758 
2759     if (list == NULL) {
2760       continue;
2761     }
2762 
2763     result = result + list->count() * list->size();
2764   }
2765   result = result + humongous_dictionary()->total_size();
2766   return result;
2767 }
2768 
2769 size_t ChunkManager::sum_free_chunks_count() {
2770   assert_lock_strong(MetaspaceExpand_lock);
2771   size_t count = 0;
2772   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2773     ChunkList* list = free_chunks(i);
2774     if (list == NULL) {
2775       continue;
2776     }
2777     count = count + list->count();
2778   }
2779   count = count + humongous_dictionary()->total_free_blocks();
2780   return count;
2781 }
2782 
2783 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2784   ChunkIndex index = list_index(word_size);
2785   assert(index < HumongousIndex, "No humongous list");
2786   return free_chunks(index);
2787 }
2788 
2789 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2790 // split up the larger chunk into n smaller chunks, at least one of which should be
2791 // the target chunk of target chunk size. The smaller chunks, including the target
2792 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2793 // Note that this chunk is supposed to be removed from the freelist right away.
2794 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2795   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2796 
2797   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2798   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2799 
2800   MetaWord* const region_start = (MetaWord*)larger_chunk;
2801   const size_t region_word_len = larger_chunk->word_size();
2802   MetaWord* const region_end = region_start + region_word_len;
2803   VirtualSpaceNode* const vsn = larger_chunk->container();
2804   OccupancyMap* const ocmap = vsn->occupancy_map();
2805 
2806   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2807   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2808   // at an address suitable to place the smaller target chunk.
2809   assert_is_aligned(region_start, target_chunk_word_size);
2810 
2811   // Remove old chunk.
2812   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2813   larger_chunk->remove_sentinel();
2814 
2815   // Prevent access to the old chunk from here on.
2816   larger_chunk = NULL;
2817   // ... and wipe it.
2818   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2819 
2820   // In its place create first the target chunk...
2821   MetaWord* p = region_start;
2822   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2823   assert(target_chunk == (Metachunk*)p, "Sanity");
2824   target_chunk->set_origin(origin_split);
2825 
2826   // Note: we do not need to mark its start in the occupancy map
2827   // because it coincides with the old chunk start.
2828 
2829   // Mark chunk as free and return to the freelist.
2830   do_update_in_use_info_for_chunk(target_chunk, false);
2831   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2832 
2833   // This chunk should now be valid and can be verified.
2834   DEBUG_ONLY(do_verify_chunk(target_chunk));
2835 
2836   // In the remaining space create the remainder chunks.
2837   p += target_chunk->word_size();
2838   assert(p < region_end, "Sanity");
2839 
2840   while (p < region_end) {
2841 
2842     // Find the largest chunk size which fits the alignment requirements at address p.
2843     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2844     size_t this_chunk_word_size = 0;
2845     for(;;) {
2846       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2847       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2848         break;
2849       } else {
2850         this_chunk_index = prev_chunk_index(this_chunk_index);
2851         assert(this_chunk_index >= target_chunk_index, "Sanity");
2852       }
2853     }
2854 
2855     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2856     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2857     assert(p + this_chunk_word_size <= region_end, "Sanity");
2858 
2859     // Create splitting chunk.
2860     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2861     assert(this_chunk == (Metachunk*)p, "Sanity");
2862     this_chunk->set_origin(origin_split);
2863     ocmap->set_chunk_starts_at_address(p, true);
2864     do_update_in_use_info_for_chunk(this_chunk, false);
2865 
2866     // This chunk should be valid and can be verified.
2867     DEBUG_ONLY(do_verify_chunk(this_chunk));
2868 
2869     // Return this chunk to freelist and correct counter.
2870     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2871     _free_chunks_count ++;
2872 
2873     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2874       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2875       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2876       p2i(region_start), p2i(region_end));
2877 
2878     p += this_chunk_word_size;
2879 
2880   }
2881 
2882   return target_chunk;
2883 }
2884 
2885 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2886   assert_lock_strong(MetaspaceExpand_lock);
2887 
2888   slow_locked_verify();
2889 
2890   Metachunk* chunk = NULL;
2891   bool we_did_split_a_chunk = false;
2892 
2893   if (list_index(word_size) != HumongousIndex) {
2894 
2895     ChunkList* free_list = find_free_chunks_list(word_size);
2896     assert(free_list != NULL, "Sanity check");
2897 
2898     chunk = free_list->head();
2899 
2900     if (chunk == NULL) {
2901       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2902       // This is the counterpart of the coalescing-upon-chunk-return.
2903 
2904       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2905 
2906       // Is there a larger chunk we could split?
2907       Metachunk* larger_chunk = NULL;
2908       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2909       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2910         larger_chunk = free_chunks(larger_chunk_index)->head();
2911         if (larger_chunk == NULL) {
2912           larger_chunk_index = next_chunk_index(larger_chunk_index);
2913         }
2914       }
2915 
2916       if (larger_chunk != NULL) {
2917         assert(larger_chunk->word_size() > word_size, "Sanity");
2918         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2919 
2920         // We found a larger chunk. Lets split it up:
2921         // - remove old chunk
2922         // - in its place, create new smaller chunks, with at least one chunk
2923         //   being of target size, the others sized as large as possible. This
2924         //   is to make sure the resulting chunks are "as coalesced as possible"
2925         //   (similar to VirtualSpaceNode::retire()).
2926         // Note: during this operation both ChunkManager and VirtualSpaceNode
2927         //  are temporarily invalid, so be careful with asserts.
2928 
2929         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2930            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2931           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2932           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2933 
2934         chunk = split_chunk(word_size, larger_chunk);
2935 
2936         // This should have worked.
2937         assert(chunk != NULL, "Sanity");
2938         assert(chunk->word_size() == word_size, "Sanity");
2939         assert(chunk->is_tagged_free(), "Sanity");
2940 
2941         we_did_split_a_chunk = true;
2942 
2943       }
2944     }
2945 
2946     if (chunk == NULL) {
2947       return NULL;
2948     }
2949 
2950     // Remove the chunk as the head of the list.
2951     free_list->remove_chunk(chunk);
2952 
2953     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2954                                        p2i(free_list), free_list->count());
2955 
2956   } else {
2957     chunk = humongous_dictionary()->get_chunk(word_size);
2958 
2959     if (chunk == NULL) {
2960       return NULL;
2961     }
2962 
2963     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2964                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2965   }
2966 
2967   // Chunk has been removed from the chunk manager; update counters.
2968   account_for_removed_chunk(chunk);
2969   do_update_in_use_info_for_chunk(chunk, true);
2970   chunk->container()->inc_container_count();
2971   chunk->inc_use_count();
2972 
2973   // Remove it from the links to this freelist
2974   chunk->set_next(NULL);
2975   chunk->set_prev(NULL);
2976 
2977   // Run some verifications (some more if we did a chunk split)
2978 #ifdef ASSERT
2979   if (VerifyMetaspace) {
2980     locked_verify();
2981     VirtualSpaceNode* const vsn = chunk->container();
2982     vsn->verify();
2983     if (we_did_split_a_chunk) {
2984       vsn->verify_free_chunks_are_ideally_merged();
2985     }
2986   }
2987 #endif
2988 
2989   return chunk;
2990 }
2991 
2992 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2993   assert_lock_strong(MetaspaceExpand_lock);
2994   slow_locked_verify();
2995 
2996   // Take from the beginning of the list
2997   Metachunk* chunk = free_chunks_get(word_size);
2998   if (chunk == NULL) {
2999     return NULL;
3000   }
3001 
3002   assert((word_size <= chunk->word_size()) ||
3003          (list_index(chunk->word_size()) == HumongousIndex),
3004          "Non-humongous variable sized chunk");
3005   LogTarget(Debug, gc, metaspace, freelist) lt;
3006   if (lt.is_enabled()) {
3007     size_t list_count;
3008     if (list_index(word_size) < HumongousIndex) {
3009       ChunkList* list = find_free_chunks_list(word_size);
3010       list_count = list->count();
3011     } else {
3012       list_count = humongous_dictionary()->total_count();
3013     }
3014     LogStream ls(lt);
3015     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3016              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3017     ResourceMark rm;
3018     locked_print_free_chunks(&ls);
3019   }
3020 
3021   return chunk;
3022 }
3023 
3024 void ChunkManager::return_single_chunk(Metachunk* chunk) {
3025   const ChunkIndex index = chunk->get_chunk_type();
3026   assert_lock_strong(MetaspaceExpand_lock);
3027   DEBUG_ONLY(do_verify_chunk(chunk);)
3028   assert(chunk != NULL, "Expected chunk.");
3029   assert(chunk->container() != NULL, "Container should have been set.");
3030   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3031   index_bounds_check(index);
3032 
3033   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3034   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3035   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3036   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3037 
3038   if (index != HumongousIndex) {
3039     // Return non-humongous chunk to freelist.
3040     ChunkList* list = free_chunks(index);
3041     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3042     list->return_chunk_at_head(chunk);
3043     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3044         chunk_size_name(index), p2i(chunk));
3045   } else {
3046     // Return humongous chunk to dictionary.
3047     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3048     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3049            "Humongous chunk has wrong alignment.");
3050     _humongous_dictionary.return_chunk(chunk);
3051     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3052         chunk_size_name(index), p2i(chunk), chunk->word_size());
3053   }
3054   chunk->container()->dec_container_count();
3055   do_update_in_use_info_for_chunk(chunk, false);
3056 
3057   // Chunk has been added; update counters.
3058   account_for_added_chunk(chunk);
3059 
3060   // Attempt coalesce returned chunks with its neighboring chunks:
3061   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3062   if (index == SmallIndex || index == SpecializedIndex) {
3063     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3064       // This did not work. But if this chunk is special, we still may form a small chunk?
3065       if (index == SpecializedIndex) {
3066         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3067           // give up.
3068         }
3069       }
3070     }
3071   }
3072 
3073 }
3074 
3075 void ChunkManager::return_chunk_list(Metachunk* chunks) {
3076   if (chunks == NULL) {
3077     return;
3078   }
3079   LogTarget(Trace, gc, metaspace, freelist) log;
3080   if (log.is_enabled()) { // tracing
3081     log.print("returning list of chunks...");
3082   }
3083   unsigned num_chunks_returned = 0;
3084   size_t size_chunks_returned = 0;
3085   Metachunk* cur = chunks;
3086   while (cur != NULL) {
3087     // Capture the next link before it is changed
3088     // by the call to return_chunk_at_head();
3089     Metachunk* next = cur->next();
3090     if (log.is_enabled()) { // tracing
3091       num_chunks_returned ++;
3092       size_chunks_returned += cur->word_size();
3093     }
3094     return_single_chunk(cur);
3095     cur = next;
3096   }
3097   if (log.is_enabled()) { // tracing
3098     log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".",
3099         num_chunks_returned, size_chunks_returned);
3100   }
3101 }
3102 
3103 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
3104   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3105   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3106     out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
3107   }
3108 }
3109 
3110 // SpaceManager methods
3111 
3112 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3113   size_t chunk_sizes[] = {
3114       specialized_chunk_size(is_class_space),
3115       small_chunk_size(is_class_space),
3116       medium_chunk_size(is_class_space)
3117   };
3118 
3119   // Adjust up to one of the fixed chunk sizes ...
3120   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3121     if (requested <= chunk_sizes[i]) {
3122       return chunk_sizes[i];
3123     }
3124   }
3125 
3126   // ... or return the size as a humongous chunk.
3127   return requested;
3128 }
3129 
3130 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3131   return adjust_initial_chunk_size(requested, is_class());
3132 }
3133 
3134 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3135   size_t requested;
3136 
3137   if (is_class()) {
3138     switch (type) {
3139     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3140     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3141     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3142     default:                                 requested = ClassSmallChunk; break;
3143     }
3144   } else {
3145     switch (type) {
3146     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3147     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3148     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3149     default:                                 requested = SmallChunk; break;
3150     }
3151   }
3152 
3153   // Adjust to one of the fixed chunk sizes (unless humongous)
3154   const size_t adjusted = adjust_initial_chunk_size(requested);
3155 
3156   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3157          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3158 
3159   return adjusted;
3160 }
3161 
3162 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3163 
3164   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3165     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
3166         num_chunks_by_type(i), chunk_size_name(i));
3167   }
3168 
3169   chunk_manager()->locked_print_free_chunks(st);
3170 }
3171 
3172 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3173 
3174   // Decide between a small chunk and a medium chunk.  Up to
3175   // _small_chunk_limit small chunks can be allocated.
3176   // After that a medium chunk is preferred.
3177   size_t chunk_word_size;
3178 
3179   // Special case for anonymous metadata space.
3180   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3181   // rarely about 4K (64-bits JVM).
3182   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3183   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3184   // reduces space waste from 60+% to around 30%.
3185   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3186       _mdtype == Metaspace::NonClassType &&
3187       num_chunks_by_type(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3188       word_size + Metachunk::overhead() <= SpecializedChunk) {
3189     return SpecializedChunk;
3190   }
3191 
3192   if (num_chunks_by_type(MediumIndex) == 0 &&
3193       num_chunks_by_type(SmallIndex) < _small_chunk_limit) {
3194     chunk_word_size = (size_t) small_chunk_size();
3195     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3196       chunk_word_size = medium_chunk_size();
3197     }
3198   } else {
3199     chunk_word_size = medium_chunk_size();
3200   }
3201 
3202   // Might still need a humongous chunk.  Enforce
3203   // humongous allocations sizes to be aligned up to
3204   // the smallest chunk size.
3205   size_t if_humongous_sized_chunk =
3206     align_up(word_size + Metachunk::overhead(),
3207                   smallest_chunk_size());
3208   chunk_word_size =
3209     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3210 
3211   assert(!SpaceManager::is_humongous(word_size) ||
3212          chunk_word_size == if_humongous_sized_chunk,
3213          "Size calculation is wrong, word_size " SIZE_FORMAT
3214          " chunk_word_size " SIZE_FORMAT,
3215          word_size, chunk_word_size);
3216   Log(gc, metaspace, alloc) log;
3217   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3218     log.debug("Metadata humongous allocation:");
3219     log.debug("  word_size " PTR_FORMAT, word_size);
3220     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3221     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3222   }
3223   return chunk_word_size;
3224 }
3225 
3226 void SpaceManager::track_metaspace_memory_usage() {
3227   if (is_init_completed()) {
3228     if (is_class()) {
3229       MemoryService::track_compressed_class_memory_usage();
3230     }
3231     MemoryService::track_metaspace_memory_usage();
3232   }
3233 }
3234 
3235 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3236   assert_lock_strong(_lock);
3237   assert(vs_list()->current_virtual_space() != NULL,
3238          "Should have been set");
3239   assert(current_chunk() == NULL ||
3240          current_chunk()->allocate(word_size) == NULL,
3241          "Don't need to expand");
3242   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3243 
3244   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3245     size_t words_left = 0;
3246     size_t words_used = 0;
3247     if (current_chunk() != NULL) {
3248       words_left = current_chunk()->free_word_size();
3249       words_used = current_chunk()->used_word_size();
3250     }
3251     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3252                                        word_size, words_used, words_left);
3253   }
3254 
3255   // Get another chunk
3256   size_t chunk_word_size = calc_chunk_size(word_size);
3257   Metachunk* next = get_new_chunk(chunk_word_size);
3258 
3259   MetaWord* mem = NULL;
3260 
3261   // If a chunk was available, add it to the in-use chunk list
3262   // and do an allocation from it.
3263   if (next != NULL) {
3264     // Add to this manager's list of chunks in use.
3265     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3266     // case it usually makes no sense to make it the current chunk, since the next allocation would
3267     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3268     // good chunk which could be used for more normal allocations.
3269     bool make_current = true;
3270     if (next->get_chunk_type() == HumongousIndex &&
3271         current_chunk() != NULL) {
3272       make_current = false;
3273     }
3274     add_chunk(next, make_current);
3275     mem = next->allocate(word_size);
3276   }
3277 
3278   // Track metaspace memory usage statistic.
3279   track_metaspace_memory_usage();
3280 
3281   return mem;
3282 }
3283 
3284 void SpaceManager::print_on(outputStream* st) const {
3285   SpaceManagerStatistics stat;
3286   add_to_statistics(&stat); // will lock _lock.
3287   stat.print_on(st, 1*K, false);
3288 }
3289 
3290 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3291                            Metaspace::MetaspaceType space_type,
3292                            Mutex* lock) :
3293   _mdtype(mdtype),
3294   _space_type(space_type),
3295   _capacity_words(0),
3296   _used_words(0),
3297   _overhead_words(0),
3298   _block_freelists(NULL),
3299   _lock(lock),
3300   _chunk_list(NULL),
3301   _current_chunk(NULL)
3302 {
3303   Metadebug::init_allocation_fail_alot_count();
3304   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
3305   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3306 }
3307 
3308 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
3309 
3310   assert_lock_strong(MetaspaceExpand_lock);
3311 
3312   _capacity_words += new_chunk->word_size();
3313   _overhead_words += Metachunk::overhead();
3314   DEBUG_ONLY(new_chunk->verify());
3315   _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
3316 
3317   // Adjust global counters:
3318   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
3319   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
3320 }
3321 
3322 void SpaceManager::account_for_allocation(size_t words) {
3323   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
3324   // We may or may not be locked with the global metaspace expansion lock.
3325   assert_lock_strong(lock());
3326 
3327   // Add to the per SpaceManager totals. This can be done non-atomically.
3328   _used_words += words;
3329 
3330   // Adjust global counters. This will be done atomically.
3331   MetaspaceUtils::inc_used(mdtype(), words);
3332 }
3333 
3334 void SpaceManager::account_for_spacemanager_death() {
3335 
3336   assert_lock_strong(MetaspaceExpand_lock);
3337 
3338   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
3339   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
3340   MetaspaceUtils::dec_used(mdtype(), _used_words);
3341 }
3342 
3343 SpaceManager::~SpaceManager() {
3344 
3345   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3346   DEBUG_ONLY(verify_metrics());
3347 
3348   MutexLockerEx fcl(MetaspaceExpand_lock,
3349                     Mutex::_no_safepoint_check_flag);
3350 
3351   chunk_manager()->slow_locked_verify();
3352 
3353   account_for_spacemanager_death();
3354 
3355   Log(gc, metaspace, freelist) log;
3356   if (log.is_trace()) {
3357     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3358     ResourceMark rm;
3359     LogStream ls(log.trace());
3360     locked_print_chunks_in_use_on(&ls);
3361     if (block_freelists() != NULL) {
3362       block_freelists()->print_on(&ls);
3363     }
3364   }
3365 
3366   // Add all the chunks in use by this space manager
3367   // to the global list of free chunks.
3368 
3369   // Follow each list of chunks-in-use and add them to the
3370   // free lists.  Each list is NULL terminated.
3371   chunk_manager()->return_chunk_list(chunk_list());
3372 #ifdef ASSERT
3373   _chunk_list = NULL;
3374   _current_chunk = NULL;
3375 #endif
3376 
3377   chunk_manager()->slow_locked_verify();
3378 
3379   if (_block_freelists != NULL) {
3380     delete _block_freelists;
3381   }
3382 }
3383 
3384 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3385   assert_lock_strong(lock());
3386   // Allocations and deallocations are in raw_word_size
3387   size_t raw_word_size = get_allocation_word_size(word_size);
3388   // Lazily create a block_freelist
3389   if (block_freelists() == NULL) {
3390     _block_freelists = new BlockFreelist();
3391   }
3392   block_freelists()->return_block(p, raw_word_size);
3393   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
3394 }
3395 
3396 // Adds a chunk to the list of chunks in use.
3397 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3398 
3399   assert_lock_strong(_lock);
3400   assert(new_chunk != NULL, "Should not be NULL");
3401   assert(new_chunk->next() == NULL, "Should not be on a list");
3402 
3403   new_chunk->reset_empty();



3404 
3405   // Find the correct list and and set the current
3406   // chunk for that list.
3407   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3408 
3409   if (make_current) {
3410     // If we are to make the chunk current, retire the old current chunk and replace
3411     // it with the new chunk.
3412     retire_current_chunk();
3413     set_current_chunk(new_chunk);
3414   }
3415 
3416   // Add the new chunk at the head of its respective chunk list.
3417   new_chunk->set_next(_chunk_list);
3418   _chunk_list = new_chunk;





3419 
3420   // Adjust counters.
3421   account_for_new_chunk(new_chunk);
3422 
3423   assert(new_chunk->is_empty(), "Not ready for reuse");
3424   Log(gc, metaspace, freelist) log;
3425   if (log.is_trace()) {
3426     log.trace("SpaceManager::added chunk: ");
3427     ResourceMark rm;
3428     LogStream ls(log.trace());
3429     new_chunk->print_on(&ls);
3430     chunk_manager()->locked_print_free_chunks(&ls);
3431   }
3432 }
3433 
3434 void SpaceManager::retire_current_chunk() {
3435   if (current_chunk() != NULL) {
3436     size_t remaining_words = current_chunk()->free_word_size();
3437     if (remaining_words >= SmallBlocks::small_block_min_size()) {
3438       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3439       deallocate(ptr, remaining_words);
3440       account_for_allocation(remaining_words);
3441     }
3442   }
3443 }
3444 
3445 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3446   // Get a chunk from the chunk freelist
3447   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);

3448 
3449   if (next == NULL) {
3450     next = vs_list()->get_new_chunk(chunk_word_size,
3451                                     medium_chunk_bunch());






3452   }
3453 
3454   Log(gc, metaspace, alloc) log;
3455   if (log.is_debug() && next != NULL &&
3456       SpaceManager::is_humongous(next->word_size())) {
3457     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());


3458   }
3459 
3460   return next;
3461 }
3462 
3463 MetaWord* SpaceManager::allocate(size_t word_size) {
3464   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3465   size_t raw_word_size = get_allocation_word_size(word_size);
3466   BlockFreelist* fl =  block_freelists();
3467   MetaWord* p = NULL;
3468 
3469   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());


3470 
3471   // Allocation from the dictionary is expensive in the sense that
3472   // the dictionary has to be searched for a size.  Don't allocate
3473   // from the dictionary until it starts to get fat.  Is this
3474   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3475   // for allocations.  Do some profiling.  JJJ
3476   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3477     p = fl->get_block(raw_word_size);
3478     if (p != NULL) {
3479       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3480     }
3481   }
3482   if (p == NULL) {
3483     p = allocate_work(raw_word_size);
3484   }
3485 
3486   return p;
3487 }
3488 
3489 // Returns the address of spaced allocated for "word_size".
3490 // This methods does not know about blocks (Metablocks)
3491 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3492   assert_lock_strong(lock());
3493 #ifdef ASSERT
3494   if (Metadebug::test_metadata_failure()) {
3495     return NULL;
3496   }
3497 #endif
3498   // Is there space in the current chunk?
3499   MetaWord* result = NULL;
3500 
3501   if (current_chunk() != NULL) {
3502     result = current_chunk()->allocate(word_size);
3503   }







3504 
3505   if (result == NULL) {
3506     result = grow_and_allocate(word_size);
3507   }
3508 
3509   if (result != NULL) {
3510     account_for_allocation(word_size);
3511   }



3512 
3513   return result;
3514 }


3515 
3516 void SpaceManager::verify() {
3517   Metachunk* curr = chunk_list();
3518   while (curr != NULL) {
3519     DEBUG_ONLY(do_verify_chunk(curr);)
3520     assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3521     curr = curr->next();
3522   }
3523 }
3524 
3525 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3526   assert(is_humongous(chunk->word_size()) ||
3527          chunk->word_size() == medium_chunk_size() ||
3528          chunk->word_size() == small_chunk_size() ||
3529          chunk->word_size() == specialized_chunk_size(),
3530          "Chunk size is wrong");
3531   return;
3532 }



3533 
3534 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3535   assert_lock_strong(lock());
3536   Metachunk* chunk = chunk_list();
3537   while (chunk != NULL) {
3538     UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
3539     chunk_stat.add_num(1);
3540     chunk_stat.add_cap(chunk->word_size());
3541     chunk_stat.add_overhead(Metachunk::overhead());
3542     chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
3543     if (chunk != current_chunk()) {
3544       chunk_stat.add_waste(chunk->free_word_size());
3545     } else {
3546       chunk_stat.add_free(chunk->free_word_size());
3547     }
3548     chunk = chunk->next();
3549   }
3550   if (block_freelists() != NULL) {
3551     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
3552   }
3553 }
3554 
3555 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3556   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3557   add_to_statistics_locked(out);
3558 }


3559 
3560 #ifdef ASSERT
3561 void SpaceManager::verify_metrics_locked() const {
3562   assert_lock_strong(lock());









3563 
3564   SpaceManagerStatistics stat;
3565   add_to_statistics_locked(&stat);
3566 
3567   UsedChunksStatistics chunk_stats = stat.totals();









3568 
3569   DEBUG_ONLY(chunk_stats.check_sanity());
3570 
3571   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
3572   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
3573   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
3574 }










3575 
3576 void SpaceManager::verify_metrics() const {
3577   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3578   verify_metrics_locked();





3579 }
3580 #endif // ASSERT
3581 
3582 
3583 
3584 // MetaspaceUtils
3585 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
3586 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
3587 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
3588 
3589 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
3590 // output will be the accumulated values for all live metaspaces.
3591 // Note: method does not do any locking.
3592 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
3593   out->reset();
3594   ClassLoaderDataGraphMetaspaceIterator iter;
3595    while (iter.repeat()) {
3596      ClassLoaderMetaspace* msp = iter.get_next();
3597      if (msp != NULL) {
3598        msp->add_to_statistics(out);
3599      }
3600    }
3601 }
3602 


3716                 "reserved "  SIZE_FORMAT "K",
3717                 used_bytes()/K,
3718                 capacity_bytes()/K,
3719                 committed_bytes()/K,
3720                 reserved_bytes()/K);
3721 
3722   if (Metaspace::using_class_space()) {
3723     Metaspace::MetadataType ct = Metaspace::ClassType;
3724     out->print_cr("  class space    "
3725                   "used "      SIZE_FORMAT "K, "
3726                   "capacity "  SIZE_FORMAT "K, "
3727                   "committed " SIZE_FORMAT "K, "
3728                   "reserved "  SIZE_FORMAT "K",
3729                   used_bytes(ct)/K,
3730                   capacity_bytes(ct)/K,
3731                   committed_bytes(ct)/K,
3732                   reserved_bytes(ct)/K);
3733   }
3734 }
3735 
3736 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
3737 private:
3738   outputStream* const _out;
3739   const size_t        _scale;
3740   const bool          _do_print;
3741   const bool          _break_down_by_chunktype;
3742 
3743 public:
3744 
3745   uintx                           _num_loaders;
3746   ClassLoaderMetaspaceStatistics  _stats_total;
3747 
3748   uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
3749   ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
3750 
3751 public:
3752   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
3753     : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
3754     , _num_loaders(0)
3755   {
3756     memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
3757   }
3758 
3759   void do_cld(ClassLoaderData* cld) {
3760 
3761     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3762 
3763     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
3764     if (msp == NULL) {
3765       return;
3766     }
3767 
3768     // Collect statistics for this class loader metaspace
3769     ClassLoaderMetaspaceStatistics this_cld_stat;
3770     msp->add_to_statistics(&this_cld_stat);
3771 
3772     // And add it to the running totals
3773     _stats_total.add(this_cld_stat);
3774     _num_loaders ++;
3775     _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
3776     _num_loaders_by_spacetype[msp->space_type()] ++;
3777 
3778     // Optionally, print.
3779     if (_do_print) {
3780 
3781       _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
3782 
3783       if (cld->is_anonymous()) {
3784         _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
3785       } else {
3786         ResourceMark rm;
3787         _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
3788       }
3789 
3790       if (cld->is_unloading()) {
3791         _out->print(" (unloading)");
3792       }
3793 
3794       this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
3795       _out->cr();
3796 
3797     }
3798 
3799   } // do_cld
3800 
3801 };
3802 
3803 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
3804   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3805   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3806   {
3807     if (Metaspace::using_class_space()) {
3808       out->print("  Non-class space:  ");
3809     }
3810     print_scaled_words(out, reserved_nonclass_words, scale, 7);
3811     out->print(" reserved, ");
3812     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
3813     out->print_cr(" committed ");
3814 
3815     if (Metaspace::using_class_space()) {
3816       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3817       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3818       out->print("      Class space:  ");
3819       print_scaled_words(out, reserved_class_words, scale, 7);
3820       out->print(" reserved, ");
3821       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);


4822 void ClassLoaderMetaspace::verify() {
4823   vsm()->verify();
4824   if (Metaspace::using_class_space()) {
4825     class_vsm()->verify();
4826   }
4827 }
4828 
4829 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
4830   assert_lock_strong(lock());
4831   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
4832   if (Metaspace::using_class_space()) {
4833     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
4834   }
4835 }
4836 
4837 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
4838   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4839   add_to_statistics_locked(out);
4840 }
4841 
4842 #ifdef ASSERT
4843 static void do_verify_chunk(Metachunk* chunk) {
4844   guarantee(chunk != NULL, "Sanity");
4845   // Verify chunk itself; then verify that it is consistent with the
4846   // occupany map of its containing node.
4847   chunk->verify();
4848   VirtualSpaceNode* const vsn = chunk->container();
4849   OccupancyMap* const ocmap = vsn->occupancy_map();
4850   ocmap->verify_for_chunk(chunk);
4851 }
4852 #endif
4853 
4854 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4855   chunk->set_is_tagged_free(!inuse);
4856   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4857   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4858 }
4859 
4860 /////////////// Unit tests ///////////////
4861 
4862 #ifndef PRODUCT
4863 
4864 class TestMetaspaceUtilsTest : AllStatic {
4865  public:
4866   static void test_reserved() {
4867     size_t reserved = MetaspaceUtils::reserved_bytes();
4868 
4869     assert(reserved > 0, "assert");
4870 
4871     size_t committed  = MetaspaceUtils::committed_bytes();
4872     assert(committed <= reserved, "assert");
4873 
4874     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
4875     assert(reserved_metadata > 0, "assert");
4876     assert(reserved_metadata <= reserved, "assert");
4877 
4878     if (UseCompressedClassPointers) {
4879       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);


5064     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5065     assert(expanded, "Failed to commit");
5066 
5067     // Calculate a size that will overflow the virtual space size.
5068     void* virtual_space_max = (void*)(uintptr_t)-1;
5069     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5070     size_t overflow_size = bottom_to_max + BytesPerWord;
5071     size_t overflow_word_size = overflow_size / BytesPerWord;
5072 
5073     // Check that is_available can handle the overflow.
5074     assert_is_available_negative(overflow_word_size);
5075   }
5076 
5077   static void test_is_available() {
5078     TestVirtualSpaceNodeTest::test_is_available_positive();
5079     TestVirtualSpaceNodeTest::test_is_available_negative();
5080     TestVirtualSpaceNodeTest::test_is_available_overflow();
5081   }
5082 };
5083 
5084 // The following test is placed here instead of a gtest / unittest file
5085 // because the ChunkManager class is only available in this file.
5086 void ChunkManager_test_list_index() {
5087   {
5088     // Test previous bug where a query for a humongous class metachunk,
5089     // incorrectly matched the non-class medium metachunk size.
5090     {
5091       ChunkManager manager(true);
5092 
5093       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5094 
5095       ChunkIndex index = manager.list_index(MediumChunk);
5096 
5097       assert(index == HumongousIndex,
5098           "Requested size is larger than ClassMediumChunk,"
5099           " so should return HumongousIndex. Got index: %d", (int)index);
5100     }
5101 
5102     // Check the specified sizes as well.
5103     {
5104       ChunkManager manager(true);
5105       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5106       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5107       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5108       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5109     }
5110     {
5111       ChunkManager manager(false);
5112       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5113       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5114       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5115       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5116     }
5117 
5118   }
5119 
5120 }
5121 
5122 #endif // !PRODUCT
5123 
5124 #ifdef ASSERT
5125 
5126 // The following test is placed here instead of a gtest / unittest file
5127 // because the ChunkManager class is only available in this file.
5128 class SpaceManagerTest : AllStatic {
5129   friend void SpaceManager_test_adjust_initial_chunk_size();
5130 
5131   static void test_adjust_initial_chunk_size(bool is_class) {
5132     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5133     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5134     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5135 
5136 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5137     do {                                                                         \
5138       size_t v = value;                                                          \
5139       size_t e = expected;                                                       \
5140       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5141              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5142     } while (0)
5143 
5144     // Smallest (specialized)
5145     test_adjust_initial_chunk_size(1,            smallest, is_class);
5146     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5147     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5148 
5149     // Small
5150     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5151     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5152     test_adjust_initial_chunk_size(normal,       normal, is_class);
5153 
5154     // Medium
5155     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5156     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5157     test_adjust_initial_chunk_size(medium,     medium, is_class);
5158 
5159     // Humongous
5160     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5161 
5162 #undef test_adjust_initial_chunk_size
5163   }
5164 
5165   static void test_adjust_initial_chunk_size() {
5166     test_adjust_initial_chunk_size(false);
5167     test_adjust_initial_chunk_size(true);
5168   }
5169 };
5170 
5171 void SpaceManager_test_adjust_initial_chunk_size() {
5172   SpaceManagerTest::test_adjust_initial_chunk_size();
5173 }
5174 
5175 #endif // ASSERT
5176 
5177 struct chunkmanager_statistics_t {
5178   int num_specialized_chunks;
5179   int num_small_chunks;
5180   int num_medium_chunks;
5181   int num_humongous_chunks;
5182 };
5183 
5184 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5185   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5186   ChunkManagerStatistics stat;
5187   chunk_manager->collect_statistics(&stat);
5188   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5189   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5190   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5191   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5192 }
5193 
5194 struct chunk_geometry_t {
5195   size_t specialized_chunk_word_size;


   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "aot/aotLoader.hpp"


  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"


  30 #include "memory/filemap.hpp"


  31 #include "memory/metaspace.hpp"
  32 #include "memory/metaspace/chunkManager.hpp"
  33 #include "memory/metaspace/metachunk.hpp"
  34 #include "memory/metaspace/metaspaceCommon.hpp"
  35 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
  36 #include "memory/metaspace/spaceManager.hpp"
  37 #include "memory/metaspace/virtualSpaceList.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"




  40 #include "runtime/init.hpp"



  41 #include "runtime/orderAccess.inline.hpp"
  42 #include "services/memTracker.hpp"


  43 #include "utilities/copy.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"



  46 


  47 
  48 using namespace metaspace::internals;







  49 
  50 MetaWord* last_allocated = 0;
  51 
  52 size_t Metaspace::_compressed_class_space_size;
  53 const MetaspaceTracer* Metaspace::_tracer = NULL;
  54 
  55 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  56 

































































































  57 static const char* space_type_name(Metaspace::MetaspaceType t) {
  58   const char* s = NULL;
  59   switch (t) {
  60     case Metaspace::StandardMetaspaceType: s = "Standard"; break;
  61     case Metaspace::BootMetaspaceType: s = "Boot"; break;
  62     case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
  63     case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
  64     default: ShouldNotReachHere();
  65   }
  66   return s;
  67 }
  68 
  69 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
  70 uint MetaspaceGC::_shrink_factor = 0;
  71 bool MetaspaceGC::_should_concurrent_collect = false;
  72 
  73 // BlockFreelist methods
  74 
  75 // VirtualSpaceNode methods




















































































































































































































































































































































































































































































































































































































































































































































































  76 
  77 // MetaspaceGC methods














  78 
  79 // VM_CollectForMetadataAllocation is the vm operation used to GC.
  80 // Within the VM operation after the GC the attempt to allocate the metadata
  81 // should succeed.  If the GC did not free enough space for the metaspace
  82 // allocation, the HWM is increased so that another virtualspace will be
  83 // allocated for the metadata.  With perm gen the increase in the perm
  84 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  85 // metaspace policy uses those as the small and large steps for the HWM.
  86 //
  87 // After the GC the compute_new_size() for MetaspaceGC is called to
  88 // resize the capacity of the metaspaces.  The current implementation
  89 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
  90 // to resize the Java heap by some GC's.  New flags can be implemented
  91 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
  92 // free space is desirable in the metaspace capacity to decide how much
  93 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
  94 // free space is desirable in the metaspace capacity before decreasing
  95 // the HWM.
  96 
  97 // Calculate the amount to increase the high water mark (HWM).
  98 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  99 // another expansion is not requested too soon.  If that is not
 100 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
 101 // If that is still not enough, expand by the size of the allocation
 102 // plus some.
 103 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
 104   size_t min_delta = MinMetaspaceExpansion;
 105   size_t max_delta = MaxMetaspaceExpansion;
 106   size_t delta = align_up(bytes, Metaspace::commit_alignment());
 107 
 108   if (delta <= min_delta) {
 109     delta = min_delta;
 110   } else if (delta <= max_delta) {
 111     // Don't want to hit the high water mark on the next
 112     // allocation so make the delta greater than just enough
 113     // for this allocation.
 114     delta = max_delta;









































 115   } else {
 116     // This allocation is large but the next ones are probably not
 117     // so increase by the minimum.
 118     delta = delta + min_delta;







































 119   }



 120 
 121   assert_is_aligned(delta, Metaspace::commit_alignment());





















 122 
 123   return delta;

















































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































 124 }
 125 
 126 size_t MetaspaceGC::capacity_until_GC() {
 127   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
 128   assert(value >= MetaspaceSize, "Not initialized properly?");
 129   return value;






 130 }
 131 
 132 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
 133   assert_is_aligned(v, Metaspace::commit_alignment());
 134 
 135   intptr_t capacity_until_GC = _capacity_until_GC;
 136   intptr_t new_value = capacity_until_GC + v;

 137 
 138   if (new_value < capacity_until_GC) {
 139     // The addition wrapped around, set new_value to aligned max value.
 140     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 141   }
 142 
 143   intptr_t expected = _capacity_until_GC;
 144   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);

 145 
 146   if (expected != actual) {
 147     return false;



 148   }
 149 
 150   if (new_cap_until_GC != NULL) {
 151     *new_cap_until_GC = new_value;
 152   }
 153   if (old_cap_until_GC != NULL) {
 154     *old_cap_until_GC = capacity_until_GC;
 155   }
 156   return true;
 157 }
 158 
 159 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 160   assert_is_aligned(v, Metaspace::commit_alignment());
 161 
 162   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);








 163 }
 164 
 165 void MetaspaceGC::initialize() {
 166   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 167   // we can't do a GC during initialization.
 168   _capacity_until_GC = MaxMetaspaceSize;





 169 }
 170 
 171 void MetaspaceGC::post_initialize() {
 172   // Reset the high-water mark once the VM initialization is done.
 173   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 174 }
 175 
 176 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 177   // Check if the compressed class space is full.
 178   if (is_class && Metaspace::using_class_space()) {
 179     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 180     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 181       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
 182                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 183       return false;
 184     }
 185   }
 186 
 187   // Check if the user has imposed a limit on the metaspace memory.
 188   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 189   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 190     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
 191               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 192     return false;
 193   }
 194 
 195   return true;
 196 }
 197 
 198 size_t MetaspaceGC::allowed_expansion() {
 199   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 200   size_t capacity_until_gc = capacity_until_GC();


 201 
 202   assert(capacity_until_gc >= committed_bytes,
 203          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
 204          capacity_until_gc, committed_bytes);
 205 
 206   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 207   size_t left_until_GC = capacity_until_gc - committed_bytes;
 208   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 209   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
 210             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
 211             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);








 212 
 213   return left_to_commit / BytesPerWord;
 214 }
 215 
 216 void MetaspaceGC::compute_new_size() {
 217   assert(_shrink_factor <= 100, "invalid shrink factor");
 218   uint current_shrink_factor = _shrink_factor;
 219   _shrink_factor = 0;







 220 
 221   // Using committed_bytes() for used_after_gc is an overestimation, since the
 222   // chunk free lists are included in committed_bytes() and the memory in an
 223   // un-fragmented chunk free list is available for future allocations.
 224   // However, if the chunk free lists becomes fragmented, then the memory may
 225   // not be available for future allocations and the memory is therefore "in use".
 226   // Including the chunk free lists in the definition of "in use" is therefore
 227   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 228   // shrink below committed_bytes() and this has caused serious bugs in the past.
 229   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 230   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 231 
 232   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 233   const double maximum_used_percentage = 1.0 - minimum_free_percentage;

 234 
 235   const double min_tmp = used_after_gc / maximum_used_percentage;
 236   size_t minimum_desired_capacity =
 237     (size_t)MIN2(min_tmp, double(max_uintx));
 238   // Don't shrink less than the initial generation size
 239   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 240                                   MetaspaceSize);
 241 
 242   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 243   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 244                            minimum_free_percentage, maximum_used_percentage);
 245   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 246 








 247 
 248   size_t shrink_bytes = 0;
 249   if (capacity_until_GC < minimum_desired_capacity) {
 250     // If we have less capacity below the metaspace HWM, then
 251     // increment the HWM.
 252     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 253     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 254     // Don't expand unless it's significant
 255     if (expand_bytes >= MinMetaspaceExpansion) {
 256       size_t new_capacity_until_GC = 0;
 257       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 258       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
 259 
 260       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 261                                                new_capacity_until_GC,
 262                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 263       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 264                                minimum_desired_capacity / (double) K,
 265                                expand_bytes / (double) K,
 266                                MinMetaspaceExpansion / (double) K,
 267                                new_capacity_until_GC / (double) K);







 268     }
 269     return;

 270   }

 271 
 272   // No expansion, now see if we want to shrink
 273   // We would never want to shrink more than this
 274   assert(capacity_until_GC >= minimum_desired_capacity,
 275          SIZE_FORMAT " >= " SIZE_FORMAT,
 276          capacity_until_GC, minimum_desired_capacity);
 277   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 278 
 279   // Should shrinking be considered?
 280   if (MaxMetaspaceFreeRatio < 100) {
 281     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 282     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 283     const double max_tmp = used_after_gc / minimum_used_percentage;
 284     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 285     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 286                                     MetaspaceSize);
 287     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 288                              maximum_free_percentage, minimum_used_percentage);
 289     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 290                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 291 
 292     assert(minimum_desired_capacity <= maximum_desired_capacity,
 293            "sanity check");
 294 
 295     if (capacity_until_GC > maximum_desired_capacity) {
 296       // Capacity too large, compute shrinking size
 297       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 298       // We don't want shrink all the way back to initSize if people call
 299       // System.gc(), because some programs do that between "phases" and then
 300       // we'd just have to grow the heap up again for the next phase.  So we
 301       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 302       // on the third call, and 100% by the fourth call.  But if we recompute
 303       // size without shrinking, it goes back to 0%.
 304       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 305 
 306       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 307 
 308       assert(shrink_bytes <= max_shrink_bytes,
 309              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
 310              shrink_bytes, max_shrink_bytes);
 311       if (current_shrink_factor == 0) {
 312         _shrink_factor = 10;
 313       } else {
 314         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 315       }
 316       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 317                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
 318       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 319                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
 320     }
 321   }
 322 
 323   // Don't shrink unless it's significant
 324   if (shrink_bytes >= MinMetaspaceExpansion &&
 325       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 326     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 327     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 328                                              new_capacity_until_GC,
 329                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 330   }
 331 }



 332 
 333 // MetaspaceUtils
 334 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
 335 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
 336 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
 337 
 338 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
 339 // output will be the accumulated values for all live metaspaces.
 340 // Note: method does not do any locking.
 341 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
 342   out->reset();
 343   ClassLoaderDataGraphMetaspaceIterator iter;
 344    while (iter.repeat()) {
 345      ClassLoaderMetaspace* msp = iter.get_next();
 346      if (msp != NULL) {
 347        msp->add_to_statistics(out);
 348      }
 349    }
 350 }
 351 


 465                 "reserved "  SIZE_FORMAT "K",
 466                 used_bytes()/K,
 467                 capacity_bytes()/K,
 468                 committed_bytes()/K,
 469                 reserved_bytes()/K);
 470 
 471   if (Metaspace::using_class_space()) {
 472     Metaspace::MetadataType ct = Metaspace::ClassType;
 473     out->print_cr("  class space    "
 474                   "used "      SIZE_FORMAT "K, "
 475                   "capacity "  SIZE_FORMAT "K, "
 476                   "committed " SIZE_FORMAT "K, "
 477                   "reserved "  SIZE_FORMAT "K",
 478                   used_bytes(ct)/K,
 479                   capacity_bytes(ct)/K,
 480                   committed_bytes(ct)/K,
 481                   reserved_bytes(ct)/K);
 482   }
 483 }
 484 


































































 485 
 486 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
 487   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
 488   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
 489   {
 490     if (Metaspace::using_class_space()) {
 491       out->print("  Non-class space:  ");
 492     }
 493     print_scaled_words(out, reserved_nonclass_words, scale, 7);
 494     out->print(" reserved, ");
 495     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
 496     out->print_cr(" committed ");
 497 
 498     if (Metaspace::using_class_space()) {
 499       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
 500       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
 501       out->print("      Class space:  ");
 502       print_scaled_words(out, reserved_class_words, scale, 7);
 503       out->print(" reserved, ");
 504       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);


1505 void ClassLoaderMetaspace::verify() {
1506   vsm()->verify();
1507   if (Metaspace::using_class_space()) {
1508     class_vsm()->verify();
1509   }
1510 }
1511 
1512 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
1513   assert_lock_strong(lock());
1514   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
1515   if (Metaspace::using_class_space()) {
1516     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
1517   }
1518 }
1519 
1520 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
1521   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1522   add_to_statistics_locked(out);
1523 }
1524 


















1525 /////////////// Unit tests ///////////////
1526 
1527 #ifndef PRODUCT
1528 
1529 class TestMetaspaceUtilsTest : AllStatic {
1530  public:
1531   static void test_reserved() {
1532     size_t reserved = MetaspaceUtils::reserved_bytes();
1533 
1534     assert(reserved > 0, "assert");
1535 
1536     size_t committed  = MetaspaceUtils::committed_bytes();
1537     assert(committed <= reserved, "assert");
1538 
1539     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
1540     assert(reserved_metadata > 0, "assert");
1541     assert(reserved_metadata <= reserved, "assert");
1542 
1543     if (UseCompressedClassPointers) {
1544       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);


1729     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
1730     assert(expanded, "Failed to commit");
1731 
1732     // Calculate a size that will overflow the virtual space size.
1733     void* virtual_space_max = (void*)(uintptr_t)-1;
1734     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
1735     size_t overflow_size = bottom_to_max + BytesPerWord;
1736     size_t overflow_word_size = overflow_size / BytesPerWord;
1737 
1738     // Check that is_available can handle the overflow.
1739     assert_is_available_negative(overflow_word_size);
1740   }
1741 
1742   static void test_is_available() {
1743     TestVirtualSpaceNodeTest::test_is_available_positive();
1744     TestVirtualSpaceNodeTest::test_is_available_negative();
1745     TestVirtualSpaceNodeTest::test_is_available_overflow();
1746   }
1747 };
1748 






































1749 #endif // !PRODUCT





















































1750 
1751 struct chunkmanager_statistics_t {
1752   int num_specialized_chunks;
1753   int num_small_chunks;
1754   int num_medium_chunks;
1755   int num_humongous_chunks;
1756 };
1757 
1758 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1759   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1760   ChunkManagerStatistics stat;
1761   chunk_manager->collect_statistics(&stat);
1762   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1763   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1764   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1765   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1766 }
1767 
1768 struct chunk_geometry_t {
1769   size_t specialized_chunk_word_size;
< prev index next >