1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.inline.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.inline.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/metaspaceStatistics.hpp"
  38 #include "memory/metaspaceGCThresholdUpdater.hpp"
  39 #include "memory/metaspaceShared.hpp"
  40 #include "memory/metaspaceTracer.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutex.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "runtime/orderAccess.inline.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "utilities/debug.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/macros.hpp"
  57 
  58 using namespace metaspace::internals;
  59 
  60 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  61 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  62 
  63 // Helper function that does a bunch of checks for a chunk.
  64 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  65 
  66 // Given a Metachunk, update its in-use information (both in the
  67 // chunk and the occupancy map).
  68 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  69 
  70 size_t const allocation_from_dictionary_limit = 4 * K;
  71 
  72 MetaWord* last_allocated = 0;
  73 
  74 size_t Metaspace::_compressed_class_space_size;
  75 const MetaspaceTracer* Metaspace::_tracer = NULL;
  76 
  77 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  78 
  79 // Internal statistics.
  80 #ifdef ASSERT
  81 static struct {
  82   // Number of allocations.
  83   uintx num_allocs;
  84   // Number of times a ClassLoaderMetaspace was born...
  85   uintx num_metaspace_births;
  86   // ... and died.
  87   uintx num_metaspace_deaths;
  88   // Number of times VirtualSpaceListNodes were created...
  89   uintx num_vsnodes_created;
  90   // ... and purged.
  91   uintx num_vsnodes_purged;
  92   // Number of times we expanded the committed section of the space.
  93   uintx num_committed_space_expanded;
  94   // Number of deallocations
  95   uintx num_deallocs;
  96   // Number of deallocations triggered from outside ("real" deallocations).
  97   uintx num_external_deallocs;
  98   // Number of times an allocation was satisfied from deallocated blocks.
  99   uintx num_allocs_from_deallocated_blocks;
 100 } g_internal_statistics;
 101 #endif
 102 
 103 enum ChunkSizes {    // in words.
 104   ClassSpecializedChunk = 128,
 105   SpecializedChunk = 128,
 106   ClassSmallChunk = 256,
 107   SmallChunk = 512,
 108   ClassMediumChunk = 4 * K,
 109   MediumChunk = 8 * K
 110 };
 111 
 112 // Returns size of this chunk type.
 113 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
 114   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
 115   size_t size = 0;
 116   if (is_class) {
 117     switch(chunktype) {
 118       case SpecializedIndex: size = ClassSpecializedChunk; break;
 119       case SmallIndex: size = ClassSmallChunk; break;
 120       case MediumIndex: size = ClassMediumChunk; break;
 121       default:
 122         ShouldNotReachHere();
 123     }
 124   } else {
 125     switch(chunktype) {
 126       case SpecializedIndex: size = SpecializedChunk; break;
 127       case SmallIndex: size = SmallChunk; break;
 128       case MediumIndex: size = MediumChunk; break;
 129       default:
 130         ShouldNotReachHere();
 131     }
 132   }
 133   return size;
 134 }
 135 
 136 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 137   if (is_class) {
 138     if (size == ClassSpecializedChunk) {
 139       return SpecializedIndex;
 140     } else if (size == ClassSmallChunk) {
 141       return SmallIndex;
 142     } else if (size == ClassMediumChunk) {
 143       return MediumIndex;
 144     } else if (size > ClassMediumChunk) {
 145       // A valid humongous chunk size is a multiple of the smallest chunk size.
 146       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 147       return HumongousIndex;
 148     }
 149   } else {
 150     if (size == SpecializedChunk) {
 151       return SpecializedIndex;
 152     } else if (size == SmallChunk) {
 153       return SmallIndex;
 154     } else if (size == MediumChunk) {
 155       return MediumIndex;
 156     } else if (size > MediumChunk) {
 157       // A valid humongous chunk size is a multiple of the smallest chunk size.
 158       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 159       return HumongousIndex;
 160     }
 161   }
 162   ShouldNotReachHere();
 163   return (ChunkIndex)-1;
 164 }
 165 
 166 ChunkIndex next_chunk_index(ChunkIndex i) {
 167   assert(i < NumberOfInUseLists, "Out of bound");
 168   return (ChunkIndex) (i+1);
 169 }
 170 
 171 ChunkIndex prev_chunk_index(ChunkIndex i) {
 172   assert(i > ZeroIndex, "Out of bound");
 173   return (ChunkIndex) (i-1);
 174 }
 175 
 176 static const char* space_type_name(Metaspace::MetaspaceType t) {
 177   const char* s = NULL;
 178   switch (t) {
 179     case Metaspace::StandardMetaspaceType: s = "Standard"; break;
 180     case Metaspace::BootMetaspaceType: s = "Boot"; break;
 181     case Metaspace::AnonymousMetaspaceType: s = "Anonymous"; break;
 182     case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
 183     default: ShouldNotReachHere();
 184   }
 185   return s;
 186 }
 187 
 188 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 189 uint MetaspaceGC::_shrink_factor = 0;
 190 bool MetaspaceGC::_should_concurrent_collect = false;
 191 
 192 
 193 typedef class FreeList<Metachunk> ChunkList;
 194 
 195 // Manages the global free lists of chunks.
 196 class ChunkManager : public CHeapObj<mtInternal> {
 197   friend class TestVirtualSpaceNodeTest;
 198 
 199   // Free list of chunks of different sizes.
 200   //   SpecializedChunk
 201   //   SmallChunk
 202   //   MediumChunk
 203   ChunkList _free_chunks[NumberOfFreeLists];
 204 
 205   // Whether or not this is the class chunkmanager.
 206   const bool _is_class;
 207 
 208   // Return non-humongous chunk list by its index.
 209   ChunkList* free_chunks(ChunkIndex index);
 210 
 211   // Returns non-humongous chunk list for the given chunk word size.
 212   ChunkList* find_free_chunks_list(size_t word_size);
 213 
 214   //   HumongousChunk
 215   ChunkTreeDictionary _humongous_dictionary;
 216 
 217   // Returns the humongous chunk dictionary.
 218   ChunkTreeDictionary* humongous_dictionary() {
 219     return &_humongous_dictionary;
 220   }
 221 
 222   // Size, in metaspace words, of all chunks managed by this ChunkManager
 223   size_t _free_chunks_total;
 224   // Number of chunks in this ChunkManager
 225   size_t _free_chunks_count;
 226 
 227   // Update counters after a chunk was added or removed removed.
 228   void account_for_added_chunk(const Metachunk* c);
 229   void account_for_removed_chunk(const Metachunk* c);
 230 
 231   // Debug support
 232 
 233   size_t sum_free_chunks();
 234   size_t sum_free_chunks_count();
 235 
 236   void locked_verify_free_chunks_total();
 237   void slow_locked_verify_free_chunks_total() {
 238     if (VerifyMetaspace) {
 239       locked_verify_free_chunks_total();
 240     }
 241   }
 242   void locked_verify_free_chunks_count();
 243   void slow_locked_verify_free_chunks_count() {
 244     if (VerifyMetaspace) {
 245       locked_verify_free_chunks_count();
 246     }
 247   }
 248   void verify_free_chunks_count();
 249 
 250   // Given a pointer to a chunk, attempts to merge it with neighboring
 251   // free chunks to form a bigger chunk. Returns true if successful.
 252   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 253 
 254   // Helper for chunk merging:
 255   //  Given an address range with 1-n chunks which are all supposed to be
 256   //  free and hence currently managed by this ChunkManager, remove them
 257   //  from this ChunkManager and mark them as invalid.
 258   // - This does not correct the occupancy map.
 259   // - This does not adjust the counters in ChunkManager.
 260   // - Does not adjust container count counter in containing VirtualSpaceNode.
 261   // Returns number of chunks removed.
 262   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 263 
 264   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 265   // split up the larger chunk into n smaller chunks, at least one of which should be
 266   // the target chunk of target chunk size. The smaller chunks, including the target
 267   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 268   // Note that this chunk is supposed to be removed from the freelist right away.
 269   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 270 
 271  public:
 272 
 273   ChunkManager(bool is_class)
 274       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 275     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 276     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 277     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 278   }
 279 
 280   // Add or delete (return) a chunk to the global freelist.
 281   Metachunk* chunk_freelist_allocate(size_t word_size);
 282 
 283   // Map a size to a list index assuming that there are lists
 284   // for special, small, medium, and humongous chunks.
 285   ChunkIndex list_index(size_t size);
 286 
 287   // Map a given index to the chunk size.
 288   size_t size_by_index(ChunkIndex index) const;
 289 
 290   bool is_class() const { return _is_class; }
 291 
 292   // Convenience accessors.
 293   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 294   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 295   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 296 
 297   // Take a chunk from the ChunkManager. The chunk is expected to be in
 298   // the chunk manager (the freelist if non-humongous, the dictionary if
 299   // humongous).
 300   void remove_chunk(Metachunk* chunk);
 301 
 302   // Return a single chunk of type index to the ChunkManager.
 303   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 304 
 305   // Add the simple linked list of chunks to the freelist of chunks
 306   // of type index.
 307   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 308 
 309   // Total of the space in the free chunks list
 310   size_t free_chunks_total_words();
 311   size_t free_chunks_total_bytes();
 312 
 313   // Number of chunks in the free chunks list
 314   size_t free_chunks_count();
 315 
 316   // Remove from a list by size.  Selects list based on size of chunk.
 317   Metachunk* free_chunks_get(size_t chunk_word_size);
 318 
 319 #define index_bounds_check(index)                                         \
 320   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 321 
 322   size_t num_free_chunks(ChunkIndex index) const {
 323     index_bounds_check(index);
 324 
 325     if (index == HumongousIndex) {
 326       return _humongous_dictionary.total_free_blocks();
 327     }
 328 
 329     ssize_t count = _free_chunks[index].count();
 330     return count == -1 ? 0 : (size_t) count;
 331   }
 332 
 333   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 334     index_bounds_check(index);
 335 
 336     size_t word_size = 0;
 337     if (index == HumongousIndex) {
 338       word_size = _humongous_dictionary.total_size();
 339     } else {
 340       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 341       word_size = size_per_chunk_in_words * num_free_chunks(index);
 342     }
 343 
 344     return word_size * BytesPerWord;
 345   }
 346 
 347   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 348     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 349                                          num_free_chunks(SmallIndex),
 350                                          num_free_chunks(MediumIndex),
 351                                          num_free_chunks(HumongousIndex),
 352                                          size_free_chunks_in_bytes(SpecializedIndex),
 353                                          size_free_chunks_in_bytes(SmallIndex),
 354                                          size_free_chunks_in_bytes(MediumIndex),
 355                                          size_free_chunks_in_bytes(HumongousIndex));
 356   }
 357 
 358   // Debug support
 359   void verify();
 360   void slow_verify() {
 361     if (VerifyMetaspace) {
 362       verify();
 363     }
 364   }
 365   void locked_verify();
 366   void slow_locked_verify() {
 367     if (VerifyMetaspace) {
 368       locked_verify();
 369     }
 370   }
 371   void verify_free_chunks_total();
 372 
 373   void locked_print_free_chunks(outputStream* st);
 374   void locked_print_sum_free_chunks(outputStream* st);
 375 
 376   void print_on(outputStream* st) const;
 377 
 378   // Fill in current statistic values to the given statistics object.
 379   void collect_statistics(ChunkManagerStatistics* out) const;
 380 
 381 };
 382 
 383 class SmallBlocks : public CHeapObj<mtClass> {
 384   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 385   // Note: this corresponds to the imposed miminum allocation size, see SpaceManager::get_allocation_word_size()
 386   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 387 
 388  private:
 389   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 390 
 391   FreeList<Metablock>& list_at(size_t word_size) {
 392     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 393     return _small_lists[word_size - _small_block_min_size];
 394   }
 395 
 396  public:
 397   SmallBlocks() {
 398     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 399       uint k = i - _small_block_min_size;
 400       _small_lists[k].set_size(i);
 401     }
 402   }
 403 
 404   // Returns the total size, in words, of all blocks, across all block sizes.
 405   size_t total_size() const {
 406     size_t result = 0;
 407     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 408       uint k = i - _small_block_min_size;
 409       result = result + _small_lists[k].count() * _small_lists[k].size();
 410     }
 411     return result;
 412   }
 413 
 414   // Returns the total number of all blocks across all block sizes.
 415   uintx total_num_blocks() const {
 416     uintx result = 0;
 417     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 418       uint k = i - _small_block_min_size;
 419       result = result + _small_lists[k].count();
 420     }
 421     return result;
 422   }
 423 
 424   static uint small_block_max_size() { return _small_block_max_size; }
 425   static uint small_block_min_size() { return _small_block_min_size; }
 426 
 427   MetaWord* get_block(size_t word_size) {
 428     if (list_at(word_size).count() > 0) {
 429       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 430       return new_block;
 431     } else {
 432       return NULL;
 433     }
 434   }
 435   void return_block(Metablock* free_chunk, size_t word_size) {
 436     list_at(word_size).return_chunk_at_head(free_chunk, false);
 437     assert(list_at(word_size).count() > 0, "Should have a chunk");
 438   }
 439 
 440   void print_on(outputStream* st) const {
 441     st->print_cr("SmallBlocks:");
 442     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 443       uint k = i - _small_block_min_size;
 444       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 445     }
 446   }
 447 };
 448 
 449 // Used to manage the free list of Metablocks (a block corresponds
 450 // to the allocation of a quantum of metadata).
 451 class BlockFreelist : public CHeapObj<mtClass> {
 452   BlockTreeDictionary* const _dictionary;
 453   SmallBlocks* _small_blocks;
 454 
 455   // Only allocate and split from freelist if the size of the allocation
 456   // is at least 1/4th the size of the available block.
 457   const static int WasteMultiplier = 4;
 458 
 459   // Accessors
 460   BlockTreeDictionary* dictionary() const { return _dictionary; }
 461   SmallBlocks* small_blocks() {
 462     if (_small_blocks == NULL) {
 463       _small_blocks = new SmallBlocks();
 464     }
 465     return _small_blocks;
 466   }
 467 
 468  public:
 469   BlockFreelist();
 470   ~BlockFreelist();
 471 
 472   // Get and return a block to the free list
 473   MetaWord* get_block(size_t word_size);
 474   void return_block(MetaWord* p, size_t word_size);
 475 
 476   // Returns the total size, in words, of all blocks kept in this structure.
 477   size_t total_size() const  {
 478     size_t result = dictionary()->total_size();
 479     if (_small_blocks != NULL) {
 480       result = result + _small_blocks->total_size();
 481     }
 482     return result;
 483   }
 484 
 485   // Returns the number of all blocks kept in this structure.
 486   uintx num_blocks() const {
 487     uintx result = dictionary()->total_free_blocks();
 488     if (_small_blocks != NULL) {
 489       result = result + _small_blocks->total_num_blocks();
 490     }
 491     return result;
 492   }
 493 
 494   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 495   void print_on(outputStream* st) const;
 496 };
 497 
 498 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 499 template <typename T> struct all_ones  { static const T value; };
 500 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 501 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 502 
 503 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 504 // keeps information about
 505 // - where a chunk starts
 506 // - whether a chunk is in-use or free
 507 // A bit in this bitmap represents one range of memory in the smallest
 508 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 509 class OccupancyMap : public CHeapObj<mtInternal> {
 510 
 511   // The address range this map covers.
 512   const MetaWord* const _reference_address;
 513   const size_t _word_size;
 514 
 515   // The word size of a specialized chunk, aka the number of words one
 516   // bit in this map represents.
 517   const size_t _smallest_chunk_word_size;
 518 
 519   // map data
 520   // Data are organized in two bit layers:
 521   // The first layer is the chunk-start-map. Here, a bit is set to mark
 522   // the corresponding region as the head of a chunk.
 523   // The second layer is the in-use-map. Here, a set bit indicates that
 524   // the corresponding belongs to a chunk which is in use.
 525   uint8_t* _map[2];
 526 
 527   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 528 
 529   // length, in bytes, of bitmap data
 530   size_t _map_size;
 531 
 532   // Returns true if bit at position pos at bit-layer layer is set.
 533   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 534     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 535     const unsigned byteoffset = pos / 8;
 536     assert(byteoffset < _map_size,
 537            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 538     const unsigned mask = 1 << (pos % 8);
 539     return (_map[layer][byteoffset] & mask) > 0;
 540   }
 541 
 542   // Changes bit at position pos at bit-layer layer to value v.
 543   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 544     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 545     const unsigned byteoffset = pos / 8;
 546     assert(byteoffset < _map_size,
 547            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 548     const unsigned mask = 1 << (pos % 8);
 549     if (v) {
 550       _map[layer][byteoffset] |= mask;
 551     } else {
 552       _map[layer][byteoffset] &= ~mask;
 553     }
 554   }
 555 
 556   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 557   // pos is 32/64 aligned and num_bits is 32/64.
 558   // This is the typical case when coalescing to medium chunks, whose size is
 559   // 32 or 64 times the specialized chunk size (depending on class or non class
 560   // case), so they occupy 64 bits which should be 64bit aligned, because
 561   // chunks are chunk-size aligned.
 562   template <typename T>
 563   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 564     assert(_map_size > 0, "not initialized");
 565     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 566     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 567     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 568     const size_t byteoffset = pos / 8;
 569     assert(byteoffset <= (_map_size - sizeof(T)),
 570            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 571     const T w = *(T*)(_map[layer] + byteoffset);
 572     return w > 0 ? true : false;
 573   }
 574 
 575   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 576   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 577     if (pos % 32 == 0 && num_bits == 32) {
 578       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 579     } else if (pos % 64 == 0 && num_bits == 64) {
 580       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 581     } else {
 582       for (unsigned n = 0; n < num_bits; n ++) {
 583         if (get_bit_at_position(pos + n, layer)) {
 584           return true;
 585         }
 586       }
 587     }
 588     return false;
 589   }
 590 
 591   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 592   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 593     assert(word_size % _smallest_chunk_word_size == 0,
 594         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 595     const unsigned pos = get_bitpos_for_address(p);
 596     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 597     return is_any_bit_set_in_region(pos, num_bits, layer);
 598   }
 599 
 600   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 601   // pos is 32/64 aligned and num_bits is 32/64.
 602   // This is the typical case when coalescing to medium chunks, whose size
 603   // is 32 or 64 times the specialized chunk size (depending on class or non
 604   // class case), so they occupy 64 bits which should be 64bit aligned,
 605   // because chunks are chunk-size aligned.
 606   template <typename T>
 607   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 608     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 609            (unsigned)(sizeof(T) * 8), pos);
 610     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 611            num_bits, (unsigned)(sizeof(T) * 8));
 612     const size_t byteoffset = pos / 8;
 613     assert(byteoffset <= (_map_size - sizeof(T)),
 614            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 615     T* const pw = (T*)(_map[layer] + byteoffset);
 616     *pw = v ? all_ones<T>::value : (T) 0;
 617   }
 618 
 619   // Set all bits in a region starting at pos to a value.
 620   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 621     assert(_map_size > 0, "not initialized");
 622     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 623     if (pos % 32 == 0 && num_bits == 32) {
 624       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 625     } else if (pos % 64 == 0 && num_bits == 64) {
 626       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 627     } else {
 628       for (unsigned n = 0; n < num_bits; n ++) {
 629         set_bit_at_position(pos + n, layer, v);
 630       }
 631     }
 632   }
 633 
 634   // Helper: sets all bits in a region [p, p+word_size).
 635   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 636     assert(word_size % _smallest_chunk_word_size == 0,
 637         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 638     const unsigned pos = get_bitpos_for_address(p);
 639     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 640     set_bits_of_region(pos, num_bits, layer, v);
 641   }
 642 
 643   // Helper: given an address, return the bit position representing that address.
 644   unsigned get_bitpos_for_address(const MetaWord* p) const {
 645     assert(_reference_address != NULL, "not initialized");
 646     assert(p >= _reference_address && p < _reference_address + _word_size,
 647            "Address %p out of range for occupancy map [%p..%p).",
 648             p, _reference_address, _reference_address + _word_size);
 649     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 650            "Address not aligned (%p).", p);
 651     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 652     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 653     return (unsigned) d;
 654   }
 655 
 656  public:
 657 
 658   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 659     _reference_address(reference_address), _word_size(word_size),
 660     _smallest_chunk_word_size(smallest_chunk_word_size) {
 661     assert(reference_address != NULL, "invalid reference address");
 662     assert(is_aligned(reference_address, smallest_chunk_word_size),
 663            "Reference address not aligned to smallest chunk size.");
 664     assert(is_aligned(word_size, smallest_chunk_word_size),
 665            "Word_size shall be a multiple of the smallest chunk size.");
 666     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 667     size_t num_bits = word_size / smallest_chunk_word_size;
 668     _map_size = (num_bits + 7) / 8;
 669     assert(_map_size * 8 >= num_bits, "sanity");
 670     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 671     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 672     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 673     memset(_map[1], 0, _map_size);
 674     memset(_map[0], 0, _map_size);
 675     // Sanity test: the first respectively last possible chunk start address in
 676     // the covered range shall map to the first and last bit in the bitmap.
 677     assert(get_bitpos_for_address(reference_address) == 0,
 678       "First chunk address in range must map to fist bit in bitmap.");
 679     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 680       "Last chunk address in range must map to last bit in bitmap.");
 681   }
 682 
 683   ~OccupancyMap() {
 684     os::free(_map[0]);
 685     os::free(_map[1]);
 686   }
 687 
 688   // Returns true if at address x a chunk is starting.
 689   bool chunk_starts_at_address(MetaWord* p) const {
 690     const unsigned pos = get_bitpos_for_address(p);
 691     return get_bit_at_position(pos, layer_chunk_start_map);
 692   }
 693 
 694   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 695     const unsigned pos = get_bitpos_for_address(p);
 696     set_bit_at_position(pos, layer_chunk_start_map, v);
 697   }
 698 
 699   // Removes all chunk-start-bits inside a region, typically as a
 700   // result of a chunk merge.
 701   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 702     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 703   }
 704 
 705   // Returns true if there are life (in use) chunks in the region limited
 706   // by [p, p+word_size).
 707   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 708     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 709   }
 710 
 711   // Marks the region starting at p with the size word_size as in use
 712   // or free, depending on v.
 713   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 714     set_bits_of_region(p, word_size, layer_in_use_map, v);
 715   }
 716 
 717 #ifdef ASSERT
 718   // Verify occupancy map for the address range [from, to).
 719   // We need to tell it the address range, because the memory the
 720   // occupancy map is covering may not be fully comitted yet.
 721   void verify(MetaWord* from, MetaWord* to) {
 722     Metachunk* chunk = NULL;
 723     int nth_bit_for_chunk = 0;
 724     MetaWord* chunk_end = NULL;
 725     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 726       const unsigned pos = get_bitpos_for_address(p);
 727       // Check the chunk-starts-info:
 728       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 729         // Chunk start marked in bitmap.
 730         chunk = (Metachunk*) p;
 731         if (chunk_end != NULL) {
 732           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 733                  "the next chunk to start at %p).", p, chunk_end);
 734         }
 735         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 736         if (chunk->get_chunk_type() != HumongousIndex) {
 737           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 738         }
 739         chunk_end = p + chunk->word_size();
 740         nth_bit_for_chunk = 0;
 741         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 742       } else {
 743         // No chunk start marked in bitmap.
 744         assert(chunk != NULL, "Chunk should start at start of address range.");
 745         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 746         nth_bit_for_chunk ++;
 747       }
 748       // Check the in-use-info:
 749       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 750       if (in_use_bit) {
 751         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 752                chunk, nth_bit_for_chunk);
 753       } else {
 754         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 755                chunk, nth_bit_for_chunk);
 756       }
 757     }
 758   }
 759 
 760   // Verify that a given chunk is correctly accounted for in the bitmap.
 761   void verify_for_chunk(Metachunk* chunk) {
 762     assert(chunk_starts_at_address((MetaWord*) chunk),
 763            "No chunk start marked in map for chunk %p.", chunk);
 764     // For chunks larger than the minimal chunk size, no other chunk
 765     // must start in its area.
 766     if (chunk->word_size() > _smallest_chunk_word_size) {
 767       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 768                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 769              "No chunk must start within another chunk.");
 770     }
 771     if (!chunk->is_tagged_free()) {
 772       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 773              "Chunk %p is in use but marked as free in map (%d %d).",
 774              chunk, chunk->get_chunk_type(), chunk->get_origin());
 775     } else {
 776       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 777              "Chunk %p is free but marked as in-use in map (%d %d).",
 778              chunk, chunk->get_chunk_type(), chunk->get_origin());
 779     }
 780   }
 781 
 782 #endif // ASSERT
 783 
 784 };
 785 
 786 // A VirtualSpaceList node.
 787 class VirtualSpaceNode : public CHeapObj<mtClass> {
 788   friend class VirtualSpaceList;
 789 
 790   // Link to next VirtualSpaceNode
 791   VirtualSpaceNode* _next;
 792 
 793   // Whether this node is contained in class or metaspace.
 794   const bool _is_class;
 795 
 796   // total in the VirtualSpace
 797   MemRegion _reserved;
 798   ReservedSpace _rs;
 799   VirtualSpace _virtual_space;
 800   MetaWord* _top;
 801   // count of chunks contained in this VirtualSpace
 802   uintx _container_count;
 803 
 804   OccupancyMap* _occupancy_map;
 805 
 806   // Convenience functions to access the _virtual_space
 807   char* low()  const { return virtual_space()->low(); }
 808   char* high() const { return virtual_space()->high(); }
 809 
 810   // The first Metachunk will be allocated at the bottom of the
 811   // VirtualSpace
 812   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 813 
 814   // Committed but unused space in the virtual space
 815   size_t free_words_in_vs() const;
 816 
 817   // True if this node belongs to class metaspace.
 818   bool is_class() const { return _is_class; }
 819 
 820   // Helper function for take_from_committed: allocate padding chunks
 821   // until top is at the given address.
 822   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 823 
 824  public:
 825 
 826   VirtualSpaceNode(bool is_class, size_t byte_size);
 827   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 828     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 829   ~VirtualSpaceNode();
 830 
 831   // Convenience functions for logical bottom and end
 832   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 833   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 834 
 835   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 836   OccupancyMap* occupancy_map() { return _occupancy_map; }
 837 
 838   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 839 
 840   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 841   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 842 
 843   bool is_pre_committed() const { return _virtual_space.special(); }
 844 
 845   // address of next available space in _virtual_space;
 846   // Accessors
 847   VirtualSpaceNode* next() { return _next; }
 848   void set_next(VirtualSpaceNode* v) { _next = v; }
 849 
 850   void set_reserved(MemRegion const v) { _reserved = v; }
 851   void set_top(MetaWord* v) { _top = v; }
 852 
 853   // Accessors
 854   MemRegion* reserved() { return &_reserved; }
 855   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 856 
 857   // Returns true if "word_size" is available in the VirtualSpace
 858   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 859 
 860   MetaWord* top() const { return _top; }
 861   void inc_top(size_t word_size) { _top += word_size; }
 862 
 863   uintx container_count() { return _container_count; }
 864   void inc_container_count();
 865   void dec_container_count();
 866 #ifdef ASSERT
 867   uintx container_count_slow();
 868   void verify_container_count();
 869 #endif
 870 
 871   // used and capacity in this single entry in the list
 872   size_t used_words_in_vs() const;
 873   size_t capacity_words_in_vs() const;
 874 
 875   bool initialize();
 876 
 877   // get space from the virtual space
 878   Metachunk* take_from_committed(size_t chunk_word_size);
 879 
 880   // Allocate a chunk from the virtual space and return it.
 881   Metachunk* get_chunk_vs(size_t chunk_word_size);
 882 
 883   // Expands/shrinks the committed space in a virtual space.  Delegates
 884   // to Virtualspace
 885   bool expand_by(size_t min_words, size_t preferred_words);
 886 
 887   // In preparation for deleting this node, remove all the chunks
 888   // in the node from any freelist.
 889   void purge(ChunkManager* chunk_manager);
 890 
 891   // If an allocation doesn't fit in the current node a new node is created.
 892   // Allocate chunks out of the remaining committed space in this node
 893   // to avoid wasting that memory.
 894   // This always adds up because all the chunk sizes are multiples of
 895   // the smallest chunk size.
 896   void retire(ChunkManager* chunk_manager);
 897 
 898 
 899   void print_on(outputStream* st) const                 { print_on(st, K); }
 900   void print_on(outputStream* st, size_t scale) const;
 901   void print_map(outputStream* st, bool is_class) const;
 902 
 903   // Debug support
 904   DEBUG_ONLY(void mangle();)
 905   // Verify counters, all chunks in this list node and the occupancy map.
 906   DEBUG_ONLY(void verify();)
 907   // Verify that all free chunks in this node are ideally merged
 908   // (there not should be multiple small chunks where a large chunk could exist.)
 909   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 910 
 911 };
 912 
 913 #define assert_is_aligned(value, alignment)                  \
 914   assert(is_aligned((value), (alignment)),                   \
 915          SIZE_FORMAT_HEX " is not aligned to "               \
 916          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 917 
 918 #define assert_counter(expected_value, real_value, msg) \
 919   assert( (expected_value) == (real_value),             \
 920          "Counter mismatch (%s): expected " SIZE_FORMAT \
 921          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
 922          real_value);
 923 
 924 // Decide if large pages should be committed when the memory is reserved.
 925 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 926   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 927     size_t words = bytes / BytesPerWord;
 928     bool is_class = false; // We never reserve large pages for the class space.
 929     if (MetaspaceGC::can_expand(words, is_class) &&
 930         MetaspaceGC::allowed_expansion() >= words) {
 931       return true;
 932     }
 933   }
 934 
 935   return false;
 936 }
 937 
 938   // byte_size is the size of the associated virtualspace.
 939 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 940   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 941   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 942   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 943   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 944 
 945   if (_rs.is_reserved()) {
 946     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 947     assert(_rs.size() != 0, "Catch if we get a 0 size");
 948     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 949     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 950 
 951     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 952   }
 953 }
 954 
 955 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 956   DEBUG_ONLY(this->verify();)
 957   Metachunk* chunk = first_chunk();
 958   Metachunk* invalid_chunk = (Metachunk*) top();
 959   while (chunk < invalid_chunk ) {
 960     assert(chunk->is_tagged_free(), "Should be tagged free");
 961     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 962     chunk_manager->remove_chunk(chunk);
 963     chunk->remove_sentinel();
 964     assert(chunk->next() == NULL &&
 965            chunk->prev() == NULL,
 966            "Was not removed from its list");
 967     chunk = (Metachunk*) next;
 968   }
 969 }
 970 
 971 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 972 
 973   if (bottom() == top()) {
 974     return;
 975   }
 976 
 977   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 978   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 979   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 980 
 981   int line_len = 100;
 982   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 983   line_len = (int)(section_len / spec_chunk_size);
 984 
 985   static const int NUM_LINES = 4;
 986 
 987   char* lines[NUM_LINES];
 988   for (int i = 0; i < NUM_LINES; i ++) {
 989     lines[i] = (char*)os::malloc(line_len, mtInternal);
 990   }
 991   int pos = 0;
 992   const MetaWord* p = bottom();
 993   const Metachunk* chunk = (const Metachunk*)p;
 994   const MetaWord* chunk_end = p + chunk->word_size();
 995   while (p < top()) {
 996     if (pos == line_len) {
 997       pos = 0;
 998       for (int i = 0; i < NUM_LINES; i ++) {
 999         st->fill_to(22);
1000         st->print_raw(lines[i], line_len);
1001         st->cr();
1002       }
1003     }
1004     if (pos == 0) {
1005       st->print(PTR_FORMAT ":", p2i(p));
1006     }
1007     if (p == chunk_end) {
1008       chunk = (Metachunk*)p;
1009       chunk_end = p + chunk->word_size();
1010     }
1011     // line 1: chunk starting points (a dot if that area is a chunk start).
1012     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
1013 
1014     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
1015     // chunk is in use.
1016     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
1017     if (chunk->word_size() == spec_chunk_size) {
1018       lines[1][pos] = chunk_is_free ? 'x' : 'X';
1019     } else if (chunk->word_size() == small_chunk_size) {
1020       lines[1][pos] = chunk_is_free ? 's' : 'S';
1021     } else if (chunk->word_size() == med_chunk_size) {
1022       lines[1][pos] = chunk_is_free ? 'm' : 'M';
1023     } else if (chunk->word_size() > med_chunk_size) {
1024       lines[1][pos] = chunk_is_free ? 'h' : 'H';
1025     } else {
1026       ShouldNotReachHere();
1027     }
1028 
1029     // Line 3: chunk origin
1030     const ChunkOrigin origin = chunk->get_origin();
1031     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
1032 
1033     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
1034     //         but were never used.
1035     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
1036 
1037     p += spec_chunk_size;
1038     pos ++;
1039   }
1040   if (pos > 0) {
1041     for (int i = 0; i < NUM_LINES; i ++) {
1042       st->fill_to(22);
1043       st->print_raw(lines[i], line_len);
1044       st->cr();
1045     }
1046   }
1047   for (int i = 0; i < NUM_LINES; i ++) {
1048     os::free(lines[i]);
1049   }
1050 }
1051 
1052 
1053 #ifdef ASSERT
1054 uintx VirtualSpaceNode::container_count_slow() {
1055   uintx count = 0;
1056   Metachunk* chunk = first_chunk();
1057   Metachunk* invalid_chunk = (Metachunk*) top();
1058   while (chunk < invalid_chunk ) {
1059     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1060     do_verify_chunk(chunk);
1061     // Don't count the chunks on the free lists.  Those are
1062     // still part of the VirtualSpaceNode but not currently
1063     // counted.
1064     if (!chunk->is_tagged_free()) {
1065       count++;
1066     }
1067     chunk = (Metachunk*) next;
1068   }
1069   return count;
1070 }
1071 #endif
1072 
1073 #ifdef ASSERT
1074 // Verify counters, all chunks in this list node and the occupancy map.
1075 void VirtualSpaceNode::verify() {
1076   uintx num_in_use_chunks = 0;
1077   Metachunk* chunk = first_chunk();
1078   Metachunk* invalid_chunk = (Metachunk*) top();
1079 
1080   // Iterate the chunks in this node and verify each chunk.
1081   while (chunk < invalid_chunk ) {
1082     DEBUG_ONLY(do_verify_chunk(chunk);)
1083     if (!chunk->is_tagged_free()) {
1084       num_in_use_chunks ++;
1085     }
1086     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1087     chunk = (Metachunk*) next;
1088   }
1089   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1090          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1091   // Also verify the occupancy map.
1092   occupancy_map()->verify(this->bottom(), this->top());
1093 }
1094 #endif // ASSERT
1095 
1096 #ifdef ASSERT
1097 // Verify that all free chunks in this node are ideally merged
1098 // (there not should be multiple small chunks where a large chunk could exist.)
1099 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1100   Metachunk* chunk = first_chunk();
1101   Metachunk* invalid_chunk = (Metachunk*) top();
1102   // Shorthands.
1103   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1104   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1105   int num_free_chunks_since_last_med_boundary = -1;
1106   int num_free_chunks_since_last_small_boundary = -1;
1107   while (chunk < invalid_chunk ) {
1108     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1109     // Reset the counter when encountering a non-free chunk.
1110     if (chunk->get_chunk_type() != HumongousIndex) {
1111       if (chunk->is_tagged_free()) {
1112         // Count successive free, non-humongous chunks.
1113         if (is_aligned(chunk, size_small)) {
1114           assert(num_free_chunks_since_last_small_boundary <= 1,
1115                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1116           num_free_chunks_since_last_small_boundary = 0;
1117         } else if (num_free_chunks_since_last_small_boundary != -1) {
1118           num_free_chunks_since_last_small_boundary ++;
1119         }
1120         if (is_aligned(chunk, size_med)) {
1121           assert(num_free_chunks_since_last_med_boundary <= 1,
1122                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1123           num_free_chunks_since_last_med_boundary = 0;
1124         } else if (num_free_chunks_since_last_med_boundary != -1) {
1125           num_free_chunks_since_last_med_boundary ++;
1126         }
1127       } else {
1128         // Encountering a non-free chunk, reset counters.
1129         num_free_chunks_since_last_med_boundary = -1;
1130         num_free_chunks_since_last_small_boundary = -1;
1131       }
1132     } else {
1133       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1134       num_free_chunks_since_last_med_boundary = -1;
1135       num_free_chunks_since_last_small_boundary = -1;
1136     }
1137 
1138     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1139     chunk = (Metachunk*) next;
1140   }
1141 }
1142 #endif // ASSERT
1143 
1144 // List of VirtualSpaces for metadata allocation.
1145 class VirtualSpaceList : public CHeapObj<mtClass> {
1146   friend class VirtualSpaceNode;
1147 
1148   enum VirtualSpaceSizes {
1149     VirtualSpaceSize = 256 * K
1150   };
1151 
1152   // Head of the list
1153   VirtualSpaceNode* _virtual_space_list;
1154   // virtual space currently being used for allocations
1155   VirtualSpaceNode* _current_virtual_space;
1156 
1157   // Is this VirtualSpaceList used for the compressed class space
1158   bool _is_class;
1159 
1160   // Sum of reserved and committed memory in the virtual spaces
1161   size_t _reserved_words;
1162   size_t _committed_words;
1163 
1164   // Number of virtual spaces
1165   size_t _virtual_space_count;
1166 
1167   ~VirtualSpaceList();
1168 
1169   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1170 
1171   void set_virtual_space_list(VirtualSpaceNode* v) {
1172     _virtual_space_list = v;
1173   }
1174   void set_current_virtual_space(VirtualSpaceNode* v) {
1175     _current_virtual_space = v;
1176   }
1177 
1178   void link_vs(VirtualSpaceNode* new_entry);
1179 
1180   // Get another virtual space and add it to the list.  This
1181   // is typically prompted by a failed attempt to allocate a chunk
1182   // and is typically followed by the allocation of a chunk.
1183   bool create_new_virtual_space(size_t vs_word_size);
1184 
1185   // Chunk up the unused committed space in the current
1186   // virtual space and add the chunks to the free list.
1187   void retire_current_virtual_space();
1188 
1189  public:
1190   VirtualSpaceList(size_t word_size);
1191   VirtualSpaceList(ReservedSpace rs);
1192 
1193   size_t free_bytes();
1194 
1195   Metachunk* get_new_chunk(size_t chunk_word_size,
1196                            size_t suggested_commit_granularity);
1197 
1198   bool expand_node_by(VirtualSpaceNode* node,
1199                       size_t min_words,
1200                       size_t preferred_words);
1201 
1202   bool expand_by(size_t min_words,
1203                  size_t preferred_words);
1204 
1205   VirtualSpaceNode* current_virtual_space() {
1206     return _current_virtual_space;
1207   }
1208 
1209   bool is_class() const { return _is_class; }
1210 
1211   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1212 
1213   size_t reserved_words()  { return _reserved_words; }
1214   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1215   size_t committed_words() { return _committed_words; }
1216   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1217 
1218   void inc_reserved_words(size_t v);
1219   void dec_reserved_words(size_t v);
1220   void inc_committed_words(size_t v);
1221   void dec_committed_words(size_t v);
1222   void inc_virtual_space_count();
1223   void dec_virtual_space_count();
1224 
1225   bool contains(const void* ptr);
1226 
1227   // Unlink empty VirtualSpaceNodes and free it.
1228   void purge(ChunkManager* chunk_manager);
1229 
1230   void print_on(outputStream* st) const                 { print_on(st, K); }
1231   void print_on(outputStream* st, size_t scale) const;
1232   void print_map(outputStream* st) const;
1233 
1234   class VirtualSpaceListIterator : public StackObj {
1235     VirtualSpaceNode* _virtual_spaces;
1236    public:
1237     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1238       _virtual_spaces(virtual_spaces) {}
1239 
1240     bool repeat() {
1241       return _virtual_spaces != NULL;
1242     }
1243 
1244     VirtualSpaceNode* get_next() {
1245       VirtualSpaceNode* result = _virtual_spaces;
1246       if (_virtual_spaces != NULL) {
1247         _virtual_spaces = _virtual_spaces->next();
1248       }
1249       return result;
1250     }
1251   };
1252 };
1253 
1254 class Metadebug : AllStatic {
1255   // Debugging support for Metaspaces
1256   static int _allocation_fail_alot_count;
1257 
1258  public:
1259 
1260   static void init_allocation_fail_alot_count();
1261 #ifdef ASSERT
1262   static bool test_metadata_failure();
1263 #endif
1264 };
1265 
1266 int Metadebug::_allocation_fail_alot_count = 0;
1267 
1268 
1269 //  SpaceManager - used by Metaspace to handle allocations
1270 class SpaceManager : public CHeapObj<mtClass> {
1271   friend class ClassLoaderMetaspace;
1272   friend class Metadebug;
1273 
1274  private:
1275 
1276   // protects allocations
1277   Mutex* const _lock;
1278 
1279   // Type of metadata allocated.
1280   const Metaspace::MetadataType   _mdtype;
1281 
1282   // Type of metaspace
1283   const Metaspace::MetaspaceType  _space_type;
1284 
1285   // List of chunks in use by this SpaceManager.  Allocations
1286   // are done from the current chunk.  The list is used for deallocating
1287   // chunks when the SpaceManager is freed.
1288   Metachunk* _chunks_in_use[NumberOfInUseLists];
1289   Metachunk* _current_chunk;
1290 
1291   // Maximum number of small chunks to allocate to a SpaceManager
1292   static uint const _small_chunk_limit;
1293 
1294   // Maximum number of specialize chunks to allocate for anonymous and delegating
1295   // metadata space to a SpaceManager
1296   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1297 
1298   // Some running counters, but lets keep their number small to not add to much to
1299   // the per-classloader footprint.
1300   // Note: capacity = used + free + waste + overhead. We do not keep running counters for
1301   // free and waste. Their sum can be deduced from the three other values.
1302   size_t _overhead_words;
1303   size_t _capacity_words;
1304   size_t _used_words;
1305 
1306   // Free lists of blocks are per SpaceManager since they
1307   // are assumed to be in chunks in use by the SpaceManager
1308   // and all chunks in use by a SpaceManager are freed when
1309   // the class loader using the SpaceManager is collected.
1310   BlockFreelist* _block_freelists;
1311 
1312  private:
1313   // Accessors
1314   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1315   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1316     _chunks_in_use[index] = v;
1317   }
1318 
1319   BlockFreelist* block_freelists() const { return _block_freelists; }
1320 
1321   Metaspace::MetadataType mdtype() { return _mdtype; }
1322 
1323   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1324   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1325 
1326   Metachunk* current_chunk() const { return _current_chunk; }
1327   void set_current_chunk(Metachunk* v) {
1328     _current_chunk = v;
1329   }
1330 
1331   Metachunk* find_current_chunk(size_t word_size);
1332 
1333   // Add chunk to the list of chunks in use
1334   void add_chunk(Metachunk* v, bool make_current);
1335   void retire_current_chunk();
1336 
1337   Mutex* lock() const { return _lock; }
1338 
1339   // Adds to the given statistic object. Expects to be locked with lock().
1340   void add_to_statistics_locked(SpaceManagerStatistics* out) const;
1341 
1342   // Verify internal counters against the current state. Expects to be locked with lock().
1343   DEBUG_ONLY(void verify_metrics_locked() const;)
1344 
1345  protected:
1346   void initialize();
1347 
1348  public:
1349   SpaceManager(Metaspace::MetadataType mdtype,
1350                Metaspace::MetaspaceType space_type,
1351                Mutex* lock);
1352   ~SpaceManager();
1353 
1354   enum ChunkMultiples {
1355     MediumChunkMultiple = 4
1356   };
1357 
1358   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1359   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1360   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1361 
1362   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1363 
1364   // Accessors
1365   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1366 
1367   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1368   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1369   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1370 
1371   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1372 
1373   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1374 
1375   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1376 
1377   size_t capacity_words() const     { return _capacity_words; }
1378   size_t used_words() const         { return _used_words; }
1379   size_t overhead_words() const     { return _overhead_words; }
1380 
1381   // Adjust local, global counters after a new chunk has been added.
1382   void account_for_new_chunk(const Metachunk* new_chunk);
1383 
1384   // Adjust local, global counters after space has been allocated from the current chunk.
1385   void account_for_allocation(size_t words);
1386 
1387   // Adjust global counters just before the SpaceManager dies, after all its chunks
1388   // have been returned to the freelist.
1389   void account_for_spacemanager_death();
1390 
1391   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1392   // or return the unadjusted size if the requested size is humongous.
1393   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1394   size_t adjust_initial_chunk_size(size_t requested) const;
1395 
1396   // Get the initial chunks size for this metaspace type.
1397   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1398 
1399   // Todo: remove this once we have counters by chunk type.
1400   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1401 
1402   Metachunk* get_new_chunk(size_t chunk_word_size);
1403 
1404   // Block allocation and deallocation.
1405   // Allocates a block from the current chunk
1406   MetaWord* allocate(size_t word_size);
1407 
1408   // Helper for allocations
1409   MetaWord* allocate_work(size_t word_size);
1410 
1411   // Returns a block to the per manager freelist
1412   void deallocate(MetaWord* p, size_t word_size);
1413 
1414   // Based on the allocation size and a minimum chunk size,
1415   // returned chunk size (for expanding space for chunk allocation).
1416   size_t calc_chunk_size(size_t allocation_word_size);
1417 
1418   // Called when an allocation from the current chunk fails.
1419   // Gets a new chunk (may require getting a new virtual space),
1420   // and allocates from that chunk.
1421   MetaWord* grow_and_allocate(size_t word_size);
1422 
1423   // Notify memory usage to MemoryService.
1424   void track_metaspace_memory_usage();
1425 
1426   // debugging support.
1427 
1428   void print_on(outputStream* st) const;
1429   void locked_print_chunks_in_use_on(outputStream* st) const;
1430 
1431   void verify();
1432   void verify_chunk_size(Metachunk* chunk);
1433 
1434   // This adjusts the size given to be greater than the minimum allocation size in
1435   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1436   size_t get_allocation_word_size(size_t word_size) {
1437     size_t byte_size = word_size * BytesPerWord;
1438 
1439     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1440     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1441 
1442     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1443     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1444 
1445     return raw_word_size;
1446   }
1447 
1448   // Adds to the given statistic object.
1449   void add_to_statistics(SpaceManagerStatistics* out) const;
1450 
1451   // Verify internal counters against the current state.
1452   DEBUG_ONLY(void verify_metrics() const;)
1453 
1454 };
1455 
1456 uint const SpaceManager::_small_chunk_limit = 4;
1457 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1458 
1459 void VirtualSpaceNode::inc_container_count() {
1460   assert_lock_strong(MetaspaceExpand_lock);
1461   _container_count++;
1462 }
1463 
1464 void VirtualSpaceNode::dec_container_count() {
1465   assert_lock_strong(MetaspaceExpand_lock);
1466   _container_count--;
1467 }
1468 
1469 #ifdef ASSERT
1470 void VirtualSpaceNode::verify_container_count() {
1471   assert(_container_count == container_count_slow(),
1472          "Inconsistency in container_count _container_count " UINTX_FORMAT
1473          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1474 }
1475 #endif
1476 
1477 // BlockFreelist methods
1478 
1479 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1480 
1481 BlockFreelist::~BlockFreelist() {
1482   delete _dictionary;
1483   if (_small_blocks != NULL) {
1484     delete _small_blocks;
1485   }
1486 }
1487 
1488 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1489   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1490 
1491   Metablock* free_chunk = ::new (p) Metablock(word_size);
1492   if (word_size < SmallBlocks::small_block_max_size()) {
1493     small_blocks()->return_block(free_chunk, word_size);
1494   } else {
1495   dictionary()->return_chunk(free_chunk);
1496 }
1497   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1498             SIZE_FORMAT, p2i(free_chunk), word_size);
1499 }
1500 
1501 MetaWord* BlockFreelist::get_block(size_t word_size) {
1502   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1503 
1504   // Try small_blocks first.
1505   if (word_size < SmallBlocks::small_block_max_size()) {
1506     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1507     // this space manager.
1508     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1509     if (new_block != NULL) {
1510       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1511               p2i(new_block), word_size);
1512       return new_block;
1513     }
1514   }
1515 
1516   if (word_size < BlockFreelist::min_dictionary_size()) {
1517     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1518     return NULL;
1519   }
1520 
1521   Metablock* free_block = dictionary()->get_chunk(word_size);
1522   if (free_block == NULL) {
1523     return NULL;
1524   }
1525 
1526   const size_t block_size = free_block->size();
1527   if (block_size > WasteMultiplier * word_size) {
1528     return_block((MetaWord*)free_block, block_size);
1529     return NULL;
1530   }
1531 
1532   MetaWord* new_block = (MetaWord*)free_block;
1533   assert(block_size >= word_size, "Incorrect size of block from freelist");
1534   const size_t unused = block_size - word_size;
1535   if (unused >= SmallBlocks::small_block_min_size()) {
1536     return_block(new_block + word_size, unused);
1537   }
1538 
1539   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1540             p2i(new_block), word_size);
1541   return new_block;
1542 }
1543 
1544 void BlockFreelist::print_on(outputStream* st) const {
1545   dictionary()->print_free_lists(st);
1546   if (_small_blocks != NULL) {
1547     _small_blocks->print_on(st);
1548   }
1549 }
1550 
1551 // VirtualSpaceNode methods
1552 
1553 VirtualSpaceNode::~VirtualSpaceNode() {
1554   _rs.release();
1555   if (_occupancy_map != NULL) {
1556     delete _occupancy_map;
1557   }
1558 #ifdef ASSERT
1559   size_t word_size = sizeof(*this) / BytesPerWord;
1560   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1561 #endif
1562 }
1563 
1564 size_t VirtualSpaceNode::used_words_in_vs() const {
1565   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1566 }
1567 
1568 // Space committed in the VirtualSpace
1569 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1570   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1571 }
1572 
1573 size_t VirtualSpaceNode::free_words_in_vs() const {
1574   return pointer_delta(end(), top(), sizeof(MetaWord));
1575 }
1576 
1577 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1578 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1579 
1580   assert(target_top > top(), "Sanity");
1581 
1582   // Padding chunks are added to the freelist.
1583   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1584 
1585   // shorthands
1586   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1587   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1588   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1589 
1590   while (top() < target_top) {
1591 
1592     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1593     // for padding chunks, so it is not worth it.
1594     size_t padding_chunk_word_size = small_word_size;
1595     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1596       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1597       padding_chunk_word_size = spec_word_size;
1598     }
1599     MetaWord* here = top();
1600     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1601     inc_top(padding_chunk_word_size);
1602 
1603     // Create new padding chunk.
1604     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1605     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1606 
1607     Metachunk* const padding_chunk =
1608       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1609     assert(padding_chunk == (Metachunk*)here, "Sanity");
1610     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1611     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1612                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1613                                        (is_class() ? "class space " : "metaspace"),
1614                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1615 
1616     // Mark chunk start in occupancy map.
1617     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1618 
1619     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1620     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1621     // will assert that).
1622     do_update_in_use_info_for_chunk(padding_chunk, true);
1623 
1624     // Return Chunk to freelist.
1625     inc_container_count();
1626     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1627     // Please note: at this point, ChunkManager::return_single_chunk()
1628     // may already have merged the padding chunk with neighboring chunks, so
1629     // it may have vanished at this point. Do not reference the padding
1630     // chunk beyond this point.
1631   }
1632 
1633   assert(top() == target_top, "Sanity");
1634 
1635 } // allocate_padding_chunks_until_top_is_at()
1636 
1637 // Allocates the chunk from the virtual space only.
1638 // This interface is also used internally for debugging.  Not all
1639 // chunks removed here are necessarily used for allocation.
1640 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1641   // Non-humongous chunks are to be allocated aligned to their chunk
1642   // size. So, start addresses of medium chunks are aligned to medium
1643   // chunk size, those of small chunks to small chunk size and so
1644   // forth. This facilitates merging of free chunks and reduces
1645   // fragmentation. Chunk sizes are spec < small < medium, with each
1646   // larger chunk size being a multiple of the next smaller chunk
1647   // size.
1648   // Because of this alignment, me may need to create a number of padding
1649   // chunks. These chunks are created and added to the freelist.
1650 
1651   // The chunk manager to which we will give our padding chunks.
1652   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1653 
1654   // shorthands
1655   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1656   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1657   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1658 
1659   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1660          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1661 
1662   // Chunk alignment (in bytes) == chunk size unless humongous.
1663   // Humongous chunks are aligned to the smallest chunk size (spec).
1664   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1665                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1666 
1667   // Do we have enough space to create the requested chunk plus
1668   // any padding chunks needed?
1669   MetaWord* const next_aligned =
1670     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1671   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1672     return NULL;
1673   }
1674 
1675   // Before allocating the requested chunk, allocate padding chunks if necessary.
1676   // We only need to do this for small or medium chunks: specialized chunks are the
1677   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1678   // (implicitly, also aligned to smallest chunk size).
1679   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1680     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1681         (is_class() ? "class space " : "metaspace"),
1682         top(), next_aligned);
1683     allocate_padding_chunks_until_top_is_at(next_aligned);
1684     // Now, top should be aligned correctly.
1685     assert_is_aligned(top(), required_chunk_alignment);
1686   }
1687 
1688   // Now, top should be aligned correctly.
1689   assert_is_aligned(top(), required_chunk_alignment);
1690 
1691   // Bottom of the new chunk
1692   MetaWord* chunk_limit = top();
1693   assert(chunk_limit != NULL, "Not safe to call this method");
1694 
1695   // The virtual spaces are always expanded by the
1696   // commit granularity to enforce the following condition.
1697   // Without this the is_available check will not work correctly.
1698   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1699       "The committed memory doesn't match the expanded memory.");
1700 
1701   if (!is_available(chunk_word_size)) {
1702     LogTarget(Debug, gc, metaspace, freelist) lt;
1703     if (lt.is_enabled()) {
1704       LogStream ls(lt);
1705       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1706       // Dump some information about the virtual space that is nearly full
1707       print_on(&ls);
1708       ls.cr(); // ~LogStream does not autoflush.
1709     }
1710     return NULL;
1711   }
1712 
1713   // Take the space  (bump top on the current virtual space).
1714   inc_top(chunk_word_size);
1715 
1716   // Initialize the chunk
1717   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1718   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1719   assert(result == (Metachunk*)chunk_limit, "Sanity");
1720   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1721   do_update_in_use_info_for_chunk(result, true);
1722 
1723   inc_container_count();
1724 
1725   if (VerifyMetaspace) {
1726     DEBUG_ONLY(chunk_manager->locked_verify());
1727     DEBUG_ONLY(this->verify());
1728   }
1729 
1730   DEBUG_ONLY(do_verify_chunk(result));
1731 
1732   result->inc_use_count();
1733 
1734   return result;
1735 }
1736 
1737 
1738 // Expand the virtual space (commit more of the reserved space)
1739 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1740   size_t min_bytes = min_words * BytesPerWord;
1741   size_t preferred_bytes = preferred_words * BytesPerWord;
1742 
1743   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1744 
1745   if (uncommitted < min_bytes) {
1746     return false;
1747   }
1748 
1749   size_t commit = MIN2(preferred_bytes, uncommitted);
1750   bool result = virtual_space()->expand_by(commit, false);
1751 
1752   if (result) {
1753     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1754               (is_class() ? "class" : "non-class"), commit);
1755     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
1756   } else {
1757     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1758               (is_class() ? "class" : "non-class"), commit);
1759   }
1760 
1761   assert(result, "Failed to commit memory");
1762 
1763   return result;
1764 }
1765 
1766 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1767   assert_lock_strong(MetaspaceExpand_lock);
1768   Metachunk* result = take_from_committed(chunk_word_size);
1769   return result;
1770 }
1771 
1772 bool VirtualSpaceNode::initialize() {
1773 
1774   if (!_rs.is_reserved()) {
1775     return false;
1776   }
1777 
1778   // These are necessary restriction to make sure that the virtual space always
1779   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1780   // aligned only the middle alignment of the VirtualSpace is used.
1781   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1782   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1783 
1784   // ReservedSpaces marked as special will have the entire memory
1785   // pre-committed. Setting a committed size will make sure that
1786   // committed_size and actual_committed_size agrees.
1787   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1788 
1789   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1790                                             Metaspace::commit_alignment());
1791   if (result) {
1792     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1793         "Checking that the pre-committed memory was registered by the VirtualSpace");
1794 
1795     set_top((MetaWord*)virtual_space()->low());
1796     set_reserved(MemRegion((HeapWord*)_rs.base(),
1797                  (HeapWord*)(_rs.base() + _rs.size())));
1798 
1799     assert(reserved()->start() == (HeapWord*) _rs.base(),
1800            "Reserved start was not set properly " PTR_FORMAT
1801            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1802     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1803            "Reserved size was not set properly " SIZE_FORMAT
1804            " != " SIZE_FORMAT, reserved()->word_size(),
1805            _rs.size() / BytesPerWord);
1806   }
1807 
1808   // Initialize Occupancy Map.
1809   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1810   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1811 
1812   return result;
1813 }
1814 
1815 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
1816   size_t used_words = used_words_in_vs();
1817   size_t commit_words = committed_words();
1818   size_t res_words = reserved_words();
1819   VirtualSpace* vs = virtual_space();
1820 
1821   st->print("node @" PTR_FORMAT ": ", p2i(this));
1822   st->print("reserved=");
1823   print_scaled_words(st, res_words, scale);
1824   st->print(", committed=");
1825   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
1826   st->print(", used=");
1827   print_scaled_words_and_percentage(st, used_words, res_words, scale);
1828   st->cr();
1829   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
1830            PTR_FORMAT ", " PTR_FORMAT ")",
1831            p2i(bottom()), p2i(top()), p2i(end()),
1832            p2i(vs->high_boundary()));
1833 }
1834 
1835 #ifdef ASSERT
1836 void VirtualSpaceNode::mangle() {
1837   size_t word_size = capacity_words_in_vs();
1838   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1839 }
1840 #endif // ASSERT
1841 
1842 // VirtualSpaceList methods
1843 // Space allocated from the VirtualSpace
1844 
1845 VirtualSpaceList::~VirtualSpaceList() {
1846   VirtualSpaceListIterator iter(virtual_space_list());
1847   while (iter.repeat()) {
1848     VirtualSpaceNode* vsl = iter.get_next();
1849     delete vsl;
1850   }
1851 }
1852 
1853 void VirtualSpaceList::inc_reserved_words(size_t v) {
1854   assert_lock_strong(MetaspaceExpand_lock);
1855   _reserved_words = _reserved_words + v;
1856 }
1857 void VirtualSpaceList::dec_reserved_words(size_t v) {
1858   assert_lock_strong(MetaspaceExpand_lock);
1859   _reserved_words = _reserved_words - v;
1860 }
1861 
1862 #define assert_committed_below_limit()                        \
1863   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1864          "Too much committed memory. Committed: " SIZE_FORMAT \
1865          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1866           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1867 
1868 void VirtualSpaceList::inc_committed_words(size_t v) {
1869   assert_lock_strong(MetaspaceExpand_lock);
1870   _committed_words = _committed_words + v;
1871 
1872   assert_committed_below_limit();
1873 }
1874 void VirtualSpaceList::dec_committed_words(size_t v) {
1875   assert_lock_strong(MetaspaceExpand_lock);
1876   _committed_words = _committed_words - v;
1877 
1878   assert_committed_below_limit();
1879 }
1880 
1881 void VirtualSpaceList::inc_virtual_space_count() {
1882   assert_lock_strong(MetaspaceExpand_lock);
1883   _virtual_space_count++;
1884 }
1885 void VirtualSpaceList::dec_virtual_space_count() {
1886   assert_lock_strong(MetaspaceExpand_lock);
1887   _virtual_space_count--;
1888 }
1889 
1890 void ChunkManager::remove_chunk(Metachunk* chunk) {
1891   size_t word_size = chunk->word_size();
1892   ChunkIndex index = list_index(word_size);
1893   if (index != HumongousIndex) {
1894     free_chunks(index)->remove_chunk(chunk);
1895   } else {
1896     humongous_dictionary()->remove_chunk(chunk);
1897   }
1898 
1899   // Chunk has been removed from the chunks free list, update counters.
1900   account_for_removed_chunk(chunk);
1901 }
1902 
1903 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1904   assert_lock_strong(MetaspaceExpand_lock);
1905   assert(chunk != NULL, "invalid chunk pointer");
1906   // Check for valid merge combinations.
1907   assert((chunk->get_chunk_type() == SpecializedIndex &&
1908           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1909          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1910         "Invalid chunk merge combination.");
1911 
1912   const size_t target_chunk_word_size =
1913     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1914 
1915   // [ prospective merge region )
1916   MetaWord* const p_merge_region_start =
1917     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1918   MetaWord* const p_merge_region_end =
1919     p_merge_region_start + target_chunk_word_size;
1920 
1921   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1922   VirtualSpaceNode* const vsn = chunk->container();
1923   OccupancyMap* const ocmap = vsn->occupancy_map();
1924 
1925   // The prospective chunk merge range must be completely contained by the
1926   // committed range of the virtual space node.
1927   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1928     return false;
1929   }
1930 
1931   // Only attempt to merge this range if at its start a chunk starts and at its end
1932   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1933   // of that range, we cannot merge.
1934   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1935     return false;
1936   }
1937   if (p_merge_region_end < vsn->top() &&
1938       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1939     return false;
1940   }
1941 
1942   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1943   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1944     return false;
1945   }
1946 
1947   // Success! Remove all chunks in this region...
1948   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1949     (is_class() ? "class space" : "metaspace"),
1950     p_merge_region_start, p_merge_region_end);
1951 
1952   const int num_chunks_removed =
1953     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1954 
1955   // ... and create a single new bigger chunk.
1956   Metachunk* const p_new_chunk =
1957       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1958   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1959   p_new_chunk->set_origin(origin_merge);
1960 
1961   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1962     (is_class() ? "class space" : "metaspace"),
1963     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1964 
1965   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1966   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1967   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1968 
1969   // Mark chunk as free. Note: it is not necessary to update the occupancy
1970   // map in-use map, because the old chunks were also free, so nothing
1971   // should have changed.
1972   p_new_chunk->set_is_tagged_free(true);
1973 
1974   // Add new chunk to its freelist.
1975   ChunkList* const list = free_chunks(target_chunk_type);
1976   list->return_chunk_at_head(p_new_chunk);
1977 
1978   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1979   // should not have changed, because the size of the space should be the same)
1980   _free_chunks_count -= num_chunks_removed;
1981   _free_chunks_count ++;
1982 
1983   // VirtualSpaceNode::container_count does not have to be modified:
1984   // it means "number of active (non-free) chunks", so merging free chunks
1985   // should not affect that count.
1986 
1987   // At the end of a chunk merge, run verification tests.
1988   if (VerifyMetaspace) {
1989     DEBUG_ONLY(this->locked_verify());
1990     DEBUG_ONLY(vsn->verify());
1991   }
1992 
1993   return true;
1994 }
1995 
1996 // Remove all chunks in the given area - the chunks are supposed to be free -
1997 // from their corresponding freelists. Mark them as invalid.
1998 // - This does not correct the occupancy map.
1999 // - This does not adjust the counters in ChunkManager.
2000 // - Does not adjust container count counter in containing VirtualSpaceNode
2001 // Returns number of chunks removed.
2002 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
2003   assert(p != NULL && word_size > 0, "Invalid range.");
2004   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
2005   assert_is_aligned(word_size, smallest_chunk_size);
2006 
2007   Metachunk* const start = (Metachunk*) p;
2008   const Metachunk* const end = (Metachunk*)(p + word_size);
2009   Metachunk* cur = start;
2010   int num_removed = 0;
2011   while (cur < end) {
2012     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
2013     DEBUG_ONLY(do_verify_chunk(cur));
2014     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
2015     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
2016     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
2017       (is_class() ? "class space" : "metaspace"),
2018       cur, cur->word_size() * sizeof(MetaWord));
2019     cur->remove_sentinel();
2020     // Note: cannot call ChunkManager::remove_chunk, because that
2021     // modifies the counters in ChunkManager, which we do not want. So
2022     // we call remove_chunk on the freelist directly (see also the
2023     // splitting function which does the same).
2024     ChunkList* const list = free_chunks(list_index(cur->word_size()));
2025     list->remove_chunk(cur);
2026     num_removed ++;
2027     cur = next;
2028   }
2029   return num_removed;
2030 }
2031 
2032 // Walk the list of VirtualSpaceNodes and delete
2033 // nodes with a 0 container_count.  Remove Metachunks in
2034 // the node from their respective freelists.
2035 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2036   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2037   assert_lock_strong(MetaspaceExpand_lock);
2038   // Don't use a VirtualSpaceListIterator because this
2039   // list is being changed and a straightforward use of an iterator is not safe.
2040   VirtualSpaceNode* purged_vsl = NULL;
2041   VirtualSpaceNode* prev_vsl = virtual_space_list();
2042   VirtualSpaceNode* next_vsl = prev_vsl;
2043   while (next_vsl != NULL) {
2044     VirtualSpaceNode* vsl = next_vsl;
2045     DEBUG_ONLY(vsl->verify_container_count();)
2046     next_vsl = vsl->next();
2047     // Don't free the current virtual space since it will likely
2048     // be needed soon.
2049     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2050       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2051                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2052       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
2053       // Unlink it from the list
2054       if (prev_vsl == vsl) {
2055         // This is the case of the current node being the first node.
2056         assert(vsl == virtual_space_list(), "Expected to be the first node");
2057         set_virtual_space_list(vsl->next());
2058       } else {
2059         prev_vsl->set_next(vsl->next());
2060       }
2061 
2062       vsl->purge(chunk_manager);
2063       dec_reserved_words(vsl->reserved_words());
2064       dec_committed_words(vsl->committed_words());
2065       dec_virtual_space_count();
2066       purged_vsl = vsl;
2067       delete vsl;
2068     } else {
2069       prev_vsl = vsl;
2070     }
2071   }
2072 #ifdef ASSERT
2073   if (purged_vsl != NULL) {
2074     // List should be stable enough to use an iterator here.
2075     VirtualSpaceListIterator iter(virtual_space_list());
2076     while (iter.repeat()) {
2077       VirtualSpaceNode* vsl = iter.get_next();
2078       assert(vsl != purged_vsl, "Purge of vsl failed");
2079     }
2080   }
2081 #endif
2082 }
2083 
2084 
2085 // This function looks at the mmap regions in the metaspace without locking.
2086 // The chunks are added with store ordering and not deleted except for at
2087 // unloading time during a safepoint.
2088 bool VirtualSpaceList::contains(const void* ptr) {
2089   // List should be stable enough to use an iterator here because removing virtual
2090   // space nodes is only allowed at a safepoint.
2091   VirtualSpaceListIterator iter(virtual_space_list());
2092   while (iter.repeat()) {
2093     VirtualSpaceNode* vsn = iter.get_next();
2094     if (vsn->contains(ptr)) {
2095       return true;
2096     }
2097   }
2098   return false;
2099 }
2100 
2101 void VirtualSpaceList::retire_current_virtual_space() {
2102   assert_lock_strong(MetaspaceExpand_lock);
2103 
2104   VirtualSpaceNode* vsn = current_virtual_space();
2105 
2106   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2107                                   Metaspace::chunk_manager_metadata();
2108 
2109   vsn->retire(cm);
2110 }
2111 
2112 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2113   DEBUG_ONLY(verify_container_count();)
2114   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2115   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2116     ChunkIndex index = (ChunkIndex)i;
2117     size_t chunk_size = chunk_manager->size_by_index(index);
2118 
2119     while (free_words_in_vs() >= chunk_size) {
2120       Metachunk* chunk = get_chunk_vs(chunk_size);
2121       // Chunk will be allocated aligned, so allocation may require
2122       // additional padding chunks. That may cause above allocation to
2123       // fail. Just ignore the failed allocation and continue with the
2124       // next smaller chunk size. As the VirtualSpaceNode comitted
2125       // size should be a multiple of the smallest chunk size, we
2126       // should always be able to fill the VirtualSpace completely.
2127       if (chunk == NULL) {
2128         break;
2129       }
2130       chunk_manager->return_single_chunk(index, chunk);
2131     }
2132     DEBUG_ONLY(verify_container_count();)
2133   }
2134   assert(free_words_in_vs() == 0, "should be empty now");
2135 }
2136 
2137 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2138                                    _is_class(false),
2139                                    _virtual_space_list(NULL),
2140                                    _current_virtual_space(NULL),
2141                                    _reserved_words(0),
2142                                    _committed_words(0),
2143                                    _virtual_space_count(0) {
2144   MutexLockerEx cl(MetaspaceExpand_lock,
2145                    Mutex::_no_safepoint_check_flag);
2146   create_new_virtual_space(word_size);
2147 }
2148 
2149 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2150                                    _is_class(true),
2151                                    _virtual_space_list(NULL),
2152                                    _current_virtual_space(NULL),
2153                                    _reserved_words(0),
2154                                    _committed_words(0),
2155                                    _virtual_space_count(0) {
2156   MutexLockerEx cl(MetaspaceExpand_lock,
2157                    Mutex::_no_safepoint_check_flag);
2158   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2159   bool succeeded = class_entry->initialize();
2160   if (succeeded) {
2161     link_vs(class_entry);
2162   }
2163 }
2164 
2165 size_t VirtualSpaceList::free_bytes() {
2166   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2167 }
2168 
2169 // Allocate another meta virtual space and add it to the list.
2170 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2171   assert_lock_strong(MetaspaceExpand_lock);
2172 
2173   if (is_class()) {
2174     assert(false, "We currently don't support more than one VirtualSpace for"
2175                   " the compressed class space. The initialization of the"
2176                   " CCS uses another code path and should not hit this path.");
2177     return false;
2178   }
2179 
2180   if (vs_word_size == 0) {
2181     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2182     return false;
2183   }
2184 
2185   // Reserve the space
2186   size_t vs_byte_size = vs_word_size * BytesPerWord;
2187   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2188 
2189   // Allocate the meta virtual space and initialize it.
2190   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2191   if (!new_entry->initialize()) {
2192     delete new_entry;
2193     return false;
2194   } else {
2195     assert(new_entry->reserved_words() == vs_word_size,
2196         "Reserved memory size differs from requested memory size");
2197     // ensure lock-free iteration sees fully initialized node
2198     OrderAccess::storestore();
2199     link_vs(new_entry);
2200     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
2201     return true;
2202   }
2203 }
2204 
2205 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2206   if (virtual_space_list() == NULL) {
2207       set_virtual_space_list(new_entry);
2208   } else {
2209     current_virtual_space()->set_next(new_entry);
2210   }
2211   set_current_virtual_space(new_entry);
2212   inc_reserved_words(new_entry->reserved_words());
2213   inc_committed_words(new_entry->committed_words());
2214   inc_virtual_space_count();
2215 #ifdef ASSERT
2216   new_entry->mangle();
2217 #endif
2218   LogTarget(Trace, gc, metaspace) lt;
2219   if (lt.is_enabled()) {
2220     LogStream ls(lt);
2221     VirtualSpaceNode* vsl = current_virtual_space();
2222     ResourceMark rm;
2223     vsl->print_on(&ls);
2224     ls.cr(); // ~LogStream does not autoflush.
2225   }
2226 }
2227 
2228 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2229                                       size_t min_words,
2230                                       size_t preferred_words) {
2231   size_t before = node->committed_words();
2232 
2233   bool result = node->expand_by(min_words, preferred_words);
2234 
2235   size_t after = node->committed_words();
2236 
2237   // after and before can be the same if the memory was pre-committed.
2238   assert(after >= before, "Inconsistency");
2239   inc_committed_words(after - before);
2240 
2241   return result;
2242 }
2243 
2244 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2245   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2246   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2247   assert(min_words <= preferred_words, "Invalid arguments");
2248 
2249   const char* const class_or_not = (is_class() ? "class" : "non-class");
2250 
2251   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2252     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2253               class_or_not);
2254     return  false;
2255   }
2256 
2257   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2258   if (allowed_expansion_words < min_words) {
2259     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2260               class_or_not);
2261     return false;
2262   }
2263 
2264   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2265 
2266   // Commit more memory from the the current virtual space.
2267   bool vs_expanded = expand_node_by(current_virtual_space(),
2268                                     min_words,
2269                                     max_expansion_words);
2270   if (vs_expanded) {
2271      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2272                class_or_not);
2273      return true;
2274   }
2275   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2276             class_or_not);
2277   retire_current_virtual_space();
2278 
2279   // Get another virtual space.
2280   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2281   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2282 
2283   if (create_new_virtual_space(grow_vs_words)) {
2284     if (current_virtual_space()->is_pre_committed()) {
2285       // The memory was pre-committed, so we are done here.
2286       assert(min_words <= current_virtual_space()->committed_words(),
2287           "The new VirtualSpace was pre-committed, so it"
2288           "should be large enough to fit the alloc request.");
2289       return true;
2290     }
2291 
2292     return expand_node_by(current_virtual_space(),
2293                           min_words,
2294                           max_expansion_words);
2295   }
2296 
2297   return false;
2298 }
2299 
2300 // Given a chunk, calculate the largest possible padding space which
2301 // could be required when allocating it.
2302 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2303   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2304   if (chunk_type != HumongousIndex) {
2305     // Normal, non-humongous chunks are allocated at chunk size
2306     // boundaries, so the largest padding space required would be that
2307     // minus the smallest chunk size.
2308     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2309     return chunk_word_size - smallest_chunk_size;
2310   } else {
2311     // Humongous chunks are allocated at smallest-chunksize
2312     // boundaries, so there is no padding required.
2313     return 0;
2314   }
2315 }
2316 
2317 
2318 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2319 
2320   // Allocate a chunk out of the current virtual space.
2321   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2322 
2323   if (next != NULL) {
2324     return next;
2325   }
2326 
2327   // The expand amount is currently only determined by the requested sizes
2328   // and not how much committed memory is left in the current virtual space.
2329 
2330   // We must have enough space for the requested size and any
2331   // additional reqired padding chunks.
2332   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2333 
2334   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2335   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2336   if (min_word_size >= preferred_word_size) {
2337     // Can happen when humongous chunks are allocated.
2338     preferred_word_size = min_word_size;
2339   }
2340 
2341   bool expanded = expand_by(min_word_size, preferred_word_size);
2342   if (expanded) {
2343     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2344     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2345   }
2346 
2347    return next;
2348 }
2349 
2350 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
2351   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
2352       _virtual_space_count, p2i(_current_virtual_space));
2353   VirtualSpaceListIterator iter(virtual_space_list());
2354   while (iter.repeat()) {
2355     st->cr();
2356     VirtualSpaceNode* node = iter.get_next();
2357     node->print_on(st, scale);
2358   }
2359 }
2360 
2361 void VirtualSpaceList::print_map(outputStream* st) const {
2362   VirtualSpaceNode* list = virtual_space_list();
2363   VirtualSpaceListIterator iter(list);
2364   unsigned i = 0;
2365   while (iter.repeat()) {
2366     st->print_cr("Node %u:", i);
2367     VirtualSpaceNode* node = iter.get_next();
2368     node->print_map(st, this->is_class());
2369     i ++;
2370   }
2371 }
2372 
2373 // MetaspaceGC methods
2374 
2375 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2376 // Within the VM operation after the GC the attempt to allocate the metadata
2377 // should succeed.  If the GC did not free enough space for the metaspace
2378 // allocation, the HWM is increased so that another virtualspace will be
2379 // allocated for the metadata.  With perm gen the increase in the perm
2380 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2381 // metaspace policy uses those as the small and large steps for the HWM.
2382 //
2383 // After the GC the compute_new_size() for MetaspaceGC is called to
2384 // resize the capacity of the metaspaces.  The current implementation
2385 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2386 // to resize the Java heap by some GC's.  New flags can be implemented
2387 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2388 // free space is desirable in the metaspace capacity to decide how much
2389 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2390 // free space is desirable in the metaspace capacity before decreasing
2391 // the HWM.
2392 
2393 // Calculate the amount to increase the high water mark (HWM).
2394 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2395 // another expansion is not requested too soon.  If that is not
2396 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2397 // If that is still not enough, expand by the size of the allocation
2398 // plus some.
2399 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2400   size_t min_delta = MinMetaspaceExpansion;
2401   size_t max_delta = MaxMetaspaceExpansion;
2402   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2403 
2404   if (delta <= min_delta) {
2405     delta = min_delta;
2406   } else if (delta <= max_delta) {
2407     // Don't want to hit the high water mark on the next
2408     // allocation so make the delta greater than just enough
2409     // for this allocation.
2410     delta = max_delta;
2411   } else {
2412     // This allocation is large but the next ones are probably not
2413     // so increase by the minimum.
2414     delta = delta + min_delta;
2415   }
2416 
2417   assert_is_aligned(delta, Metaspace::commit_alignment());
2418 
2419   return delta;
2420 }
2421 
2422 size_t MetaspaceGC::capacity_until_GC() {
2423   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2424   assert(value >= MetaspaceSize, "Not initialized properly?");
2425   return value;
2426 }
2427 
2428 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2429   assert_is_aligned(v, Metaspace::commit_alignment());
2430 
2431   intptr_t capacity_until_GC = _capacity_until_GC;
2432   intptr_t new_value = capacity_until_GC + v;
2433 
2434   if (new_value < capacity_until_GC) {
2435     // The addition wrapped around, set new_value to aligned max value.
2436     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2437   }
2438 
2439   intptr_t expected = _capacity_until_GC;
2440   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2441 
2442   if (expected != actual) {
2443     return false;
2444   }
2445 
2446   if (new_cap_until_GC != NULL) {
2447     *new_cap_until_GC = new_value;
2448   }
2449   if (old_cap_until_GC != NULL) {
2450     *old_cap_until_GC = capacity_until_GC;
2451   }
2452   return true;
2453 }
2454 
2455 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2456   assert_is_aligned(v, Metaspace::commit_alignment());
2457 
2458   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2459 }
2460 
2461 void MetaspaceGC::initialize() {
2462   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2463   // we can't do a GC during initialization.
2464   _capacity_until_GC = MaxMetaspaceSize;
2465 }
2466 
2467 void MetaspaceGC::post_initialize() {
2468   // Reset the high-water mark once the VM initialization is done.
2469   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2470 }
2471 
2472 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2473   // Check if the compressed class space is full.
2474   if (is_class && Metaspace::using_class_space()) {
2475     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2476     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2477       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2478                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2479       return false;
2480     }
2481   }
2482 
2483   // Check if the user has imposed a limit on the metaspace memory.
2484   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2485   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2486     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2487               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2488     return false;
2489   }
2490 
2491   return true;
2492 }
2493 
2494 size_t MetaspaceGC::allowed_expansion() {
2495   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2496   size_t capacity_until_gc = capacity_until_GC();
2497 
2498   assert(capacity_until_gc >= committed_bytes,
2499          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2500          capacity_until_gc, committed_bytes);
2501 
2502   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2503   size_t left_until_GC = capacity_until_gc - committed_bytes;
2504   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2505   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2506             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2507             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2508 
2509   return left_to_commit / BytesPerWord;
2510 }
2511 
2512 void MetaspaceGC::compute_new_size() {
2513   assert(_shrink_factor <= 100, "invalid shrink factor");
2514   uint current_shrink_factor = _shrink_factor;
2515   _shrink_factor = 0;
2516 
2517   // Using committed_bytes() for used_after_gc is an overestimation, since the
2518   // chunk free lists are included in committed_bytes() and the memory in an
2519   // un-fragmented chunk free list is available for future allocations.
2520   // However, if the chunk free lists becomes fragmented, then the memory may
2521   // not be available for future allocations and the memory is therefore "in use".
2522   // Including the chunk free lists in the definition of "in use" is therefore
2523   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2524   // shrink below committed_bytes() and this has caused serious bugs in the past.
2525   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2526   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2527 
2528   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2529   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2530 
2531   const double min_tmp = used_after_gc / maximum_used_percentage;
2532   size_t minimum_desired_capacity =
2533     (size_t)MIN2(min_tmp, double(max_uintx));
2534   // Don't shrink less than the initial generation size
2535   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2536                                   MetaspaceSize);
2537 
2538   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2539   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2540                            minimum_free_percentage, maximum_used_percentage);
2541   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2542 
2543 
2544   size_t shrink_bytes = 0;
2545   if (capacity_until_GC < minimum_desired_capacity) {
2546     // If we have less capacity below the metaspace HWM, then
2547     // increment the HWM.
2548     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2549     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2550     // Don't expand unless it's significant
2551     if (expand_bytes >= MinMetaspaceExpansion) {
2552       size_t new_capacity_until_GC = 0;
2553       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2554       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2555 
2556       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2557                                                new_capacity_until_GC,
2558                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2559       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2560                                minimum_desired_capacity / (double) K,
2561                                expand_bytes / (double) K,
2562                                MinMetaspaceExpansion / (double) K,
2563                                new_capacity_until_GC / (double) K);
2564     }
2565     return;
2566   }
2567 
2568   // No expansion, now see if we want to shrink
2569   // We would never want to shrink more than this
2570   assert(capacity_until_GC >= minimum_desired_capacity,
2571          SIZE_FORMAT " >= " SIZE_FORMAT,
2572          capacity_until_GC, minimum_desired_capacity);
2573   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2574 
2575   // Should shrinking be considered?
2576   if (MaxMetaspaceFreeRatio < 100) {
2577     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2578     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2579     const double max_tmp = used_after_gc / minimum_used_percentage;
2580     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2581     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2582                                     MetaspaceSize);
2583     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2584                              maximum_free_percentage, minimum_used_percentage);
2585     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2586                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2587 
2588     assert(minimum_desired_capacity <= maximum_desired_capacity,
2589            "sanity check");
2590 
2591     if (capacity_until_GC > maximum_desired_capacity) {
2592       // Capacity too large, compute shrinking size
2593       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2594       // We don't want shrink all the way back to initSize if people call
2595       // System.gc(), because some programs do that between "phases" and then
2596       // we'd just have to grow the heap up again for the next phase.  So we
2597       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2598       // on the third call, and 100% by the fourth call.  But if we recompute
2599       // size without shrinking, it goes back to 0%.
2600       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2601 
2602       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2603 
2604       assert(shrink_bytes <= max_shrink_bytes,
2605              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2606              shrink_bytes, max_shrink_bytes);
2607       if (current_shrink_factor == 0) {
2608         _shrink_factor = 10;
2609       } else {
2610         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2611       }
2612       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2613                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2614       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2615                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2616     }
2617   }
2618 
2619   // Don't shrink unless it's significant
2620   if (shrink_bytes >= MinMetaspaceExpansion &&
2621       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2622     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2623     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2624                                              new_capacity_until_GC,
2625                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2626   }
2627 }
2628 
2629 // Metadebug methods
2630 
2631 void Metadebug::init_allocation_fail_alot_count() {
2632   if (MetadataAllocationFailALot) {
2633     _allocation_fail_alot_count =
2634       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2635   }
2636 }
2637 
2638 #ifdef ASSERT
2639 bool Metadebug::test_metadata_failure() {
2640   if (MetadataAllocationFailALot &&
2641       Threads::is_vm_complete()) {
2642     if (_allocation_fail_alot_count > 0) {
2643       _allocation_fail_alot_count--;
2644     } else {
2645       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2646       init_allocation_fail_alot_count();
2647       return true;
2648     }
2649   }
2650   return false;
2651 }
2652 #endif
2653 
2654 // ChunkManager methods
2655 size_t ChunkManager::free_chunks_total_words() {
2656   return _free_chunks_total;
2657 }
2658 
2659 size_t ChunkManager::free_chunks_total_bytes() {
2660   return free_chunks_total_words() * BytesPerWord;
2661 }
2662 
2663 // Update internal accounting after a chunk was added
2664 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2665   assert_lock_strong(MetaspaceExpand_lock);
2666   _free_chunks_count ++;
2667   _free_chunks_total += c->word_size();
2668 }
2669 
2670 // Update internal accounting after a chunk was removed
2671 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2672   assert_lock_strong(MetaspaceExpand_lock);
2673   assert(_free_chunks_count >= 1,
2674     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2675   assert(_free_chunks_total >= c->word_size(),
2676     "ChunkManager::_free_chunks_total: about to go negative"
2677      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2678   _free_chunks_count --;
2679   _free_chunks_total -= c->word_size();
2680 }
2681 
2682 size_t ChunkManager::free_chunks_count() {
2683 #ifdef ASSERT
2684   if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) {
2685     MutexLockerEx cl(MetaspaceExpand_lock,
2686                      Mutex::_no_safepoint_check_flag);
2687     // This lock is only needed in debug because the verification
2688     // of the _free_chunks_totals walks the list of free chunks
2689     slow_locked_verify_free_chunks_count();
2690   }
2691 #endif
2692   return _free_chunks_count;
2693 }
2694 
2695 ChunkIndex ChunkManager::list_index(size_t size) {
2696   return get_chunk_type_by_size(size, is_class());
2697 }
2698 
2699 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2700   index_bounds_check(index);
2701   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2702   return get_size_for_nonhumongous_chunktype(index, is_class());
2703 }
2704 
2705 void ChunkManager::locked_verify_free_chunks_total() {
2706   assert_lock_strong(MetaspaceExpand_lock);
2707   assert(sum_free_chunks() == _free_chunks_total,
2708          "_free_chunks_total " SIZE_FORMAT " is not the"
2709          " same as sum " SIZE_FORMAT, _free_chunks_total,
2710          sum_free_chunks());
2711 }
2712 
2713 void ChunkManager::verify_free_chunks_total() {
2714   MutexLockerEx cl(MetaspaceExpand_lock,
2715                      Mutex::_no_safepoint_check_flag);
2716   locked_verify_free_chunks_total();
2717 }
2718 
2719 void ChunkManager::locked_verify_free_chunks_count() {
2720   assert_lock_strong(MetaspaceExpand_lock);
2721   assert(sum_free_chunks_count() == _free_chunks_count,
2722          "_free_chunks_count " SIZE_FORMAT " is not the"
2723          " same as sum " SIZE_FORMAT, _free_chunks_count,
2724          sum_free_chunks_count());
2725 }
2726 
2727 void ChunkManager::verify_free_chunks_count() {
2728 #ifdef ASSERT
2729   MutexLockerEx cl(MetaspaceExpand_lock,
2730                      Mutex::_no_safepoint_check_flag);
2731   locked_verify_free_chunks_count();
2732 #endif
2733 }
2734 
2735 void ChunkManager::verify() {
2736   MutexLockerEx cl(MetaspaceExpand_lock,
2737                      Mutex::_no_safepoint_check_flag);
2738   locked_verify();
2739 }
2740 
2741 void ChunkManager::locked_verify() {
2742   locked_verify_free_chunks_count();
2743   locked_verify_free_chunks_total();
2744   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2745     ChunkList* list = free_chunks(i);
2746     if (list != NULL) {
2747       Metachunk* chunk = list->head();
2748       while (chunk) {
2749         DEBUG_ONLY(do_verify_chunk(chunk);)
2750         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2751         chunk = chunk->next();
2752       }
2753     }
2754   }
2755 }
2756 
2757 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2758   assert_lock_strong(MetaspaceExpand_lock);
2759   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2760                 _free_chunks_total, _free_chunks_count);
2761 }
2762 
2763 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2764   assert_lock_strong(MetaspaceExpand_lock);
2765   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2766                 sum_free_chunks(), sum_free_chunks_count());
2767 }
2768 
2769 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2770   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2771          "Bad index: %d", (int)index);
2772 
2773   return &_free_chunks[index];
2774 }
2775 
2776 // These methods that sum the free chunk lists are used in printing
2777 // methods that are used in product builds.
2778 size_t ChunkManager::sum_free_chunks() {
2779   assert_lock_strong(MetaspaceExpand_lock);
2780   size_t result = 0;
2781   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2782     ChunkList* list = free_chunks(i);
2783 
2784     if (list == NULL) {
2785       continue;
2786     }
2787 
2788     result = result + list->count() * list->size();
2789   }
2790   result = result + humongous_dictionary()->total_size();
2791   return result;
2792 }
2793 
2794 size_t ChunkManager::sum_free_chunks_count() {
2795   assert_lock_strong(MetaspaceExpand_lock);
2796   size_t count = 0;
2797   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2798     ChunkList* list = free_chunks(i);
2799     if (list == NULL) {
2800       continue;
2801     }
2802     count = count + list->count();
2803   }
2804   count = count + humongous_dictionary()->total_free_blocks();
2805   return count;
2806 }
2807 
2808 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2809   ChunkIndex index = list_index(word_size);
2810   assert(index < HumongousIndex, "No humongous list");
2811   return free_chunks(index);
2812 }
2813 
2814 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2815 // split up the larger chunk into n smaller chunks, at least one of which should be
2816 // the target chunk of target chunk size. The smaller chunks, including the target
2817 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2818 // Note that this chunk is supposed to be removed from the freelist right away.
2819 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2820   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2821 
2822   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2823   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2824 
2825   MetaWord* const region_start = (MetaWord*)larger_chunk;
2826   const size_t region_word_len = larger_chunk->word_size();
2827   MetaWord* const region_end = region_start + region_word_len;
2828   VirtualSpaceNode* const vsn = larger_chunk->container();
2829   OccupancyMap* const ocmap = vsn->occupancy_map();
2830 
2831   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2832   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2833   // at an address suitable to place the smaller target chunk.
2834   assert_is_aligned(region_start, target_chunk_word_size);
2835 
2836   // Remove old chunk.
2837   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2838   larger_chunk->remove_sentinel();
2839 
2840   // Prevent access to the old chunk from here on.
2841   larger_chunk = NULL;
2842   // ... and wipe it.
2843   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2844 
2845   // In its place create first the target chunk...
2846   MetaWord* p = region_start;
2847   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2848   assert(target_chunk == (Metachunk*)p, "Sanity");
2849   target_chunk->set_origin(origin_split);
2850 
2851   // Note: we do not need to mark its start in the occupancy map
2852   // because it coincides with the old chunk start.
2853 
2854   // Mark chunk as free and return to the freelist.
2855   do_update_in_use_info_for_chunk(target_chunk, false);
2856   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2857 
2858   // This chunk should now be valid and can be verified.
2859   DEBUG_ONLY(do_verify_chunk(target_chunk));
2860 
2861   // In the remaining space create the remainder chunks.
2862   p += target_chunk->word_size();
2863   assert(p < region_end, "Sanity");
2864 
2865   while (p < region_end) {
2866 
2867     // Find the largest chunk size which fits the alignment requirements at address p.
2868     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2869     size_t this_chunk_word_size = 0;
2870     for(;;) {
2871       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2872       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2873         break;
2874       } else {
2875         this_chunk_index = prev_chunk_index(this_chunk_index);
2876         assert(this_chunk_index >= target_chunk_index, "Sanity");
2877       }
2878     }
2879 
2880     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2881     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2882     assert(p + this_chunk_word_size <= region_end, "Sanity");
2883 
2884     // Create splitting chunk.
2885     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2886     assert(this_chunk == (Metachunk*)p, "Sanity");
2887     this_chunk->set_origin(origin_split);
2888     ocmap->set_chunk_starts_at_address(p, true);
2889     do_update_in_use_info_for_chunk(this_chunk, false);
2890 
2891     // This chunk should be valid and can be verified.
2892     DEBUG_ONLY(do_verify_chunk(this_chunk));
2893 
2894     // Return this chunk to freelist and correct counter.
2895     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2896     _free_chunks_count ++;
2897 
2898     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2899       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2900       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2901       p2i(region_start), p2i(region_end));
2902 
2903     p += this_chunk_word_size;
2904 
2905   }
2906 
2907   return target_chunk;
2908 }
2909 
2910 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2911   assert_lock_strong(MetaspaceExpand_lock);
2912 
2913   slow_locked_verify();
2914 
2915   Metachunk* chunk = NULL;
2916   bool we_did_split_a_chunk = false;
2917 
2918   if (list_index(word_size) != HumongousIndex) {
2919 
2920     ChunkList* free_list = find_free_chunks_list(word_size);
2921     assert(free_list != NULL, "Sanity check");
2922 
2923     chunk = free_list->head();
2924 
2925     if (chunk == NULL) {
2926       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2927       // This is the counterpart of the coalescing-upon-chunk-return.
2928 
2929       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2930 
2931       // Is there a larger chunk we could split?
2932       Metachunk* larger_chunk = NULL;
2933       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2934       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2935         larger_chunk = free_chunks(larger_chunk_index)->head();
2936         if (larger_chunk == NULL) {
2937           larger_chunk_index = next_chunk_index(larger_chunk_index);
2938         }
2939       }
2940 
2941       if (larger_chunk != NULL) {
2942         assert(larger_chunk->word_size() > word_size, "Sanity");
2943         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2944 
2945         // We found a larger chunk. Lets split it up:
2946         // - remove old chunk
2947         // - in its place, create new smaller chunks, with at least one chunk
2948         //   being of target size, the others sized as large as possible. This
2949         //   is to make sure the resulting chunks are "as coalesced as possible"
2950         //   (similar to VirtualSpaceNode::retire()).
2951         // Note: during this operation both ChunkManager and VirtualSpaceNode
2952         //  are temporarily invalid, so be careful with asserts.
2953 
2954         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2955            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2956           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2957           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2958 
2959         chunk = split_chunk(word_size, larger_chunk);
2960 
2961         // This should have worked.
2962         assert(chunk != NULL, "Sanity");
2963         assert(chunk->word_size() == word_size, "Sanity");
2964         assert(chunk->is_tagged_free(), "Sanity");
2965 
2966         we_did_split_a_chunk = true;
2967 
2968       }
2969     }
2970 
2971     if (chunk == NULL) {
2972       return NULL;
2973     }
2974 
2975     // Remove the chunk as the head of the list.
2976     free_list->remove_chunk(chunk);
2977 
2978     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2979                                        p2i(free_list), free_list->count());
2980 
2981   } else {
2982     chunk = humongous_dictionary()->get_chunk(word_size);
2983 
2984     if (chunk == NULL) {
2985       return NULL;
2986     }
2987 
2988     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2989                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2990   }
2991 
2992   // Chunk has been removed from the chunk manager; update counters.
2993   account_for_removed_chunk(chunk);
2994   do_update_in_use_info_for_chunk(chunk, true);
2995   chunk->container()->inc_container_count();
2996   chunk->inc_use_count();
2997 
2998   // Remove it from the links to this freelist
2999   chunk->set_next(NULL);
3000   chunk->set_prev(NULL);
3001 
3002   // Run some verifications (some more if we did a chunk split)
3003 #ifdef ASSERT
3004   if (VerifyMetaspace) {
3005     locked_verify();
3006     VirtualSpaceNode* const vsn = chunk->container();
3007     vsn->verify();
3008     if (we_did_split_a_chunk) {
3009       vsn->verify_free_chunks_are_ideally_merged();
3010     }
3011   }
3012 #endif
3013 
3014   return chunk;
3015 }
3016 
3017 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
3018   assert_lock_strong(MetaspaceExpand_lock);
3019   slow_locked_verify();
3020 
3021   // Take from the beginning of the list
3022   Metachunk* chunk = free_chunks_get(word_size);
3023   if (chunk == NULL) {
3024     return NULL;
3025   }
3026 
3027   assert((word_size <= chunk->word_size()) ||
3028          (list_index(chunk->word_size()) == HumongousIndex),
3029          "Non-humongous variable sized chunk");
3030   LogTarget(Debug, gc, metaspace, freelist) lt;
3031   if (lt.is_enabled()) {
3032     size_t list_count;
3033     if (list_index(word_size) < HumongousIndex) {
3034       ChunkList* list = find_free_chunks_list(word_size);
3035       list_count = list->count();
3036     } else {
3037       list_count = humongous_dictionary()->total_count();
3038     }
3039     LogStream ls(lt);
3040     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3041              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3042     ResourceMark rm;
3043     locked_print_free_chunks(&ls);
3044     ls.cr(); // ~LogStream does not autoflush.
3045   }
3046 
3047   return chunk;
3048 }
3049 
3050 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3051   assert_lock_strong(MetaspaceExpand_lock);
3052   DEBUG_ONLY(do_verify_chunk(chunk);)
3053   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3054   assert(chunk != NULL, "Expected chunk.");
3055   assert(chunk->container() != NULL, "Container should have been set.");
3056   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3057   index_bounds_check(index);
3058 
3059   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3060   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3061   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3062   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3063 
3064   if (index != HumongousIndex) {
3065     // Return non-humongous chunk to freelist.
3066     ChunkList* list = free_chunks(index);
3067     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3068     list->return_chunk_at_head(chunk);
3069     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3070         chunk_size_name(index), p2i(chunk));
3071   } else {
3072     // Return humongous chunk to dictionary.
3073     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3074     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3075            "Humongous chunk has wrong alignment.");
3076     _humongous_dictionary.return_chunk(chunk);
3077     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3078         chunk_size_name(index), p2i(chunk), chunk->word_size());
3079   }
3080   chunk->container()->dec_container_count();
3081   do_update_in_use_info_for_chunk(chunk, false);
3082 
3083   // Chunk has been added; update counters.
3084   account_for_added_chunk(chunk);
3085 
3086   // Attempt coalesce returned chunks with its neighboring chunks:
3087   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3088   if (index == SmallIndex || index == SpecializedIndex) {
3089     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3090       // This did not work. But if this chunk is special, we still may form a small chunk?
3091       if (index == SpecializedIndex) {
3092         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3093           // give up.
3094         }
3095       }
3096     }
3097   }
3098 
3099 }
3100 
3101 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3102   index_bounds_check(index);
3103   if (chunks == NULL) {
3104     return;
3105   }
3106   LogTarget(Trace, gc, metaspace, freelist) log;
3107   if (log.is_enabled()) { // tracing
3108     log.print("returning list of %s chunks...", chunk_size_name(index));
3109   }
3110   unsigned num_chunks_returned = 0;
3111   size_t size_chunks_returned = 0;
3112   Metachunk* cur = chunks;
3113   while (cur != NULL) {
3114     // Capture the next link before it is changed
3115     // by the call to return_chunk_at_head();
3116     Metachunk* next = cur->next();
3117     if (log.is_enabled()) { // tracing
3118       num_chunks_returned ++;
3119       size_chunks_returned += cur->word_size();
3120     }
3121     return_single_chunk(index, cur);
3122     cur = next;
3123   }
3124   if (log.is_enabled()) { // tracing
3125     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3126         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3127     if (index != HumongousIndex) {
3128       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3129     } else {
3130       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3131     }
3132   }
3133 }
3134 
3135 void ChunkManager::print_on(outputStream* out) const {
3136   _humongous_dictionary.report_statistics(out);
3137 }
3138 
3139 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
3140   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3141   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3142     out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
3143   }
3144 }
3145 
3146 // SpaceManager methods
3147 
3148 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3149   size_t chunk_sizes[] = {
3150       specialized_chunk_size(is_class_space),
3151       small_chunk_size(is_class_space),
3152       medium_chunk_size(is_class_space)
3153   };
3154 
3155   // Adjust up to one of the fixed chunk sizes ...
3156   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3157     if (requested <= chunk_sizes[i]) {
3158       return chunk_sizes[i];
3159     }
3160   }
3161 
3162   // ... or return the size as a humongous chunk.
3163   return requested;
3164 }
3165 
3166 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3167   return adjust_initial_chunk_size(requested, is_class());
3168 }
3169 
3170 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3171   size_t requested;
3172 
3173   if (is_class()) {
3174     switch (type) {
3175     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3176     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3177     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3178     default:                                 requested = ClassSmallChunk; break;
3179     }
3180   } else {
3181     switch (type) {
3182     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3183     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3184     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3185     default:                                 requested = SmallChunk; break;
3186     }
3187   }
3188 
3189   // Adjust to one of the fixed chunk sizes (unless humongous)
3190   const size_t adjusted = adjust_initial_chunk_size(requested);
3191 
3192   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3193          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3194 
3195   return adjusted;
3196 }
3197 
3198 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3199   size_t count = 0;
3200   Metachunk* chunk = chunks_in_use(i);
3201   while (chunk != NULL) {
3202     count++;
3203     chunk = chunk->next();
3204   }
3205   return count;
3206 }
3207 
3208 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3209 
3210   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3211     Metachunk* chunk = chunks_in_use(i);
3212     st->print("SpaceManager: %s " PTR_FORMAT,
3213                  chunk_size_name(i), p2i(chunk));
3214     if (chunk != NULL) {
3215       st->print_cr(" free " SIZE_FORMAT,
3216                    chunk->free_word_size());
3217     } else {
3218       st->cr();
3219     }
3220   }
3221 
3222   chunk_manager()->locked_print_free_chunks(st);
3223   chunk_manager()->locked_print_sum_free_chunks(st);
3224 }
3225 
3226 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3227 
3228   // Decide between a small chunk and a medium chunk.  Up to
3229   // _small_chunk_limit small chunks can be allocated.
3230   // After that a medium chunk is preferred.
3231   size_t chunk_word_size;
3232 
3233   // Special case for anonymous metadata space.
3234   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3235   // rarely about 4K (64-bits JVM).
3236   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3237   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3238   // reduces space waste from 60+% to around 30%.
3239   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3240       _mdtype == Metaspace::NonClassType &&
3241       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3242       word_size + Metachunk::overhead() <= SpecializedChunk) {
3243     return SpecializedChunk;
3244   }
3245 
3246   if (chunks_in_use(MediumIndex) == NULL &&
3247       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3248     chunk_word_size = (size_t) small_chunk_size();
3249     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3250       chunk_word_size = medium_chunk_size();
3251     }
3252   } else {
3253     chunk_word_size = medium_chunk_size();
3254   }
3255 
3256   // Might still need a humongous chunk.  Enforce
3257   // humongous allocations sizes to be aligned up to
3258   // the smallest chunk size.
3259   size_t if_humongous_sized_chunk =
3260     align_up(word_size + Metachunk::overhead(),
3261                   smallest_chunk_size());
3262   chunk_word_size =
3263     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3264 
3265   assert(!SpaceManager::is_humongous(word_size) ||
3266          chunk_word_size == if_humongous_sized_chunk,
3267          "Size calculation is wrong, word_size " SIZE_FORMAT
3268          " chunk_word_size " SIZE_FORMAT,
3269          word_size, chunk_word_size);
3270   Log(gc, metaspace, alloc) log;
3271   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3272     log.debug("Metadata humongous allocation:");
3273     log.debug("  word_size " PTR_FORMAT, word_size);
3274     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3275     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3276   }
3277   return chunk_word_size;
3278 }
3279 
3280 void SpaceManager::track_metaspace_memory_usage() {
3281   if (is_init_completed()) {
3282     if (is_class()) {
3283       MemoryService::track_compressed_class_memory_usage();
3284     }
3285     MemoryService::track_metaspace_memory_usage();
3286   }
3287 }
3288 
3289 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3290   assert_lock_strong(_lock);
3291   assert(vs_list()->current_virtual_space() != NULL,
3292          "Should have been set");
3293   assert(current_chunk() == NULL ||
3294          current_chunk()->allocate(word_size) == NULL,
3295          "Don't need to expand");
3296   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
3297 
3298   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3299     size_t words_left = 0;
3300     size_t words_used = 0;
3301     if (current_chunk() != NULL) {
3302       words_left = current_chunk()->free_word_size();
3303       words_used = current_chunk()->used_word_size();
3304     }
3305     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3306                                        word_size, words_used, words_left);
3307   }
3308 
3309   // Get another chunk
3310   size_t chunk_word_size = calc_chunk_size(word_size);
3311   Metachunk* next = get_new_chunk(chunk_word_size);
3312 
3313   MetaWord* mem = NULL;
3314 
3315   // If a chunk was available, add it to the in-use chunk list
3316   // and do an allocation from it.
3317   if (next != NULL) {
3318     // Add to this manager's list of chunks in use.
3319     // If the new chunk is humongous, it was created to serve a single large allocation. In that
3320     // case it usually makes no sense to make it the current chunk, since the next allocation would
3321     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
3322     // good chunk which could be used for more normal allocations.
3323     bool make_current = true;
3324     if (next->get_chunk_type() == HumongousIndex &&
3325         current_chunk() != NULL) {
3326       make_current = false;
3327     }
3328     add_chunk(next, make_current);
3329     mem = next->allocate(word_size);
3330   }
3331 
3332   // Track metaspace memory usage statistic.
3333   track_metaspace_memory_usage();
3334 
3335   return mem;
3336 }
3337 
3338 void SpaceManager::print_on(outputStream* st) const {
3339   SpaceManagerStatistics stat;
3340   add_to_statistics(&stat); // will lock _lock.
3341   stat.print_on(st, 1*K, false);
3342 }
3343 
3344 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3345                            Metaspace::MetaspaceType space_type,
3346                            Mutex* lock) :
3347   _mdtype(mdtype),
3348   _space_type(space_type),
3349   _capacity_words(0),
3350   _used_words(0),
3351   _overhead_words(0),
3352   _block_freelists(NULL),
3353   _lock(lock)
3354 {
3355   initialize();
3356 }
3357 
3358 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
3359 
3360   assert_lock_strong(MetaspaceExpand_lock);
3361 
3362   _capacity_words += new_chunk->word_size();
3363   _overhead_words += Metachunk::overhead();
3364 
3365   // Adjust global counters:
3366   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
3367   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
3368 }
3369 
3370 void SpaceManager::account_for_allocation(size_t words) {
3371   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
3372   // We may or may not be locked with the global metaspace expansion lock.
3373   assert_lock_strong(lock());
3374 
3375   // Add to the per SpaceManager totals. This can be done non-atomically.
3376   _used_words += words;
3377 
3378   // Adjust global counters. This will be done atomically.
3379   MetaspaceUtils::inc_used(mdtype(), words);
3380 }
3381 
3382 void SpaceManager::account_for_spacemanager_death() {
3383 
3384   assert_lock_strong(MetaspaceExpand_lock);
3385 
3386   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
3387   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
3388   MetaspaceUtils::dec_used(mdtype(), _used_words);
3389 }
3390 
3391 void SpaceManager::initialize() {
3392   Metadebug::init_allocation_fail_alot_count();
3393   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3394     _chunks_in_use[i] = NULL;
3395   }
3396   _current_chunk = NULL;
3397   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3398 }
3399 
3400 SpaceManager::~SpaceManager() {
3401 
3402   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
3403   DEBUG_ONLY(verify_metrics());
3404 
3405   MutexLockerEx fcl(MetaspaceExpand_lock,
3406                     Mutex::_no_safepoint_check_flag);
3407 
3408   chunk_manager()->slow_locked_verify();
3409 
3410   account_for_spacemanager_death();
3411 
3412   Log(gc, metaspace, freelist) log;
3413   if (log.is_trace()) {
3414     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3415     ResourceMark rm;
3416     LogStream ls(log.trace());
3417     locked_print_chunks_in_use_on(&ls);
3418     if (block_freelists() != NULL) {
3419       block_freelists()->print_on(&ls);
3420     }
3421     ls.cr(); // ~LogStream does not autoflush.
3422   }
3423 
3424   // Add all the chunks in use by this space manager
3425   // to the global list of free chunks.
3426 
3427   // Follow each list of chunks-in-use and add them to the
3428   // free lists.  Each list is NULL terminated.
3429 
3430   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3431     Metachunk* chunks = chunks_in_use(i);
3432     chunk_manager()->return_chunk_list(i, chunks);
3433     set_chunks_in_use(i, NULL);
3434   }
3435 
3436   chunk_manager()->slow_locked_verify();
3437 
3438   if (_block_freelists != NULL) {
3439     delete _block_freelists;
3440   }
3441 }
3442 
3443 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3444   assert_lock_strong(lock());
3445   // Allocations and deallocations are in raw_word_size
3446   size_t raw_word_size = get_allocation_word_size(word_size);
3447   // Lazily create a block_freelist
3448   if (block_freelists() == NULL) {
3449     _block_freelists = new BlockFreelist();
3450   }
3451   block_freelists()->return_block(p, raw_word_size);
3452   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_deallocs));
3453 }
3454 
3455 // Adds a chunk to the list of chunks in use.
3456 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3457 
3458   assert_lock_strong(_lock);
3459   assert(new_chunk != NULL, "Should not be NULL");
3460   assert(new_chunk->next() == NULL, "Should not be on a list");
3461 
3462   new_chunk->reset_empty();
3463 
3464   // Find the correct list and and set the current
3465   // chunk for that list.
3466   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3467 
3468   if (make_current) {
3469     // If we are to make the chunk current, retire the old current chunk and replace
3470     // it with the new chunk.
3471     retire_current_chunk();
3472     set_current_chunk(new_chunk);
3473   }
3474 
3475   // Add the new chunk at the head of its respective chunk list.
3476   new_chunk->set_next(chunks_in_use(index));
3477   set_chunks_in_use(index, new_chunk);
3478 
3479   // Adjust counters.
3480   account_for_new_chunk(new_chunk);
3481 
3482   assert(new_chunk->is_empty(), "Not ready for reuse");
3483   Log(gc, metaspace, freelist) log;
3484   if (log.is_trace()) {
3485     log.trace("SpaceManager::added chunk: ");
3486     ResourceMark rm;
3487     LogStream ls(log.trace());
3488     new_chunk->print_on(&ls);
3489     chunk_manager()->locked_print_free_chunks(&ls);
3490     ls.cr(); // ~LogStream does not autoflush.
3491   }
3492 }
3493 
3494 void SpaceManager::retire_current_chunk() {
3495   if (current_chunk() != NULL) {
3496     size_t remaining_words = current_chunk()->free_word_size();
3497     if (remaining_words >= SmallBlocks::small_block_min_size()) {
3498       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3499       deallocate(ptr, remaining_words);
3500       account_for_allocation(remaining_words);
3501     }
3502   }
3503 }
3504 
3505 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3506   // Get a chunk from the chunk freelist
3507   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3508 
3509   if (next == NULL) {
3510     next = vs_list()->get_new_chunk(chunk_word_size,
3511                                     medium_chunk_bunch());
3512   }
3513 
3514   Log(gc, metaspace, alloc) log;
3515   if (log.is_debug() && next != NULL &&
3516       SpaceManager::is_humongous(next->word_size())) {
3517     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3518   }
3519 
3520   return next;
3521 }
3522 
3523 MetaWord* SpaceManager::allocate(size_t word_size) {
3524   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3525   size_t raw_word_size = get_allocation_word_size(word_size);
3526   BlockFreelist* fl =  block_freelists();
3527   MetaWord* p = NULL;
3528 
3529   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
3530 
3531   // Allocation from the dictionary is expensive in the sense that
3532   // the dictionary has to be searched for a size.  Don't allocate
3533   // from the dictionary until it starts to get fat.  Is this
3534   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3535   // for allocations.  Do some profiling.  JJJ
3536   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3537     p = fl->get_block(raw_word_size);
3538     if (p != NULL) {
3539       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
3540     }
3541   }
3542   if (p == NULL) {
3543     p = allocate_work(raw_word_size);
3544   }
3545 
3546   return p;
3547 }
3548 
3549 // Returns the address of spaced allocated for "word_size".
3550 // This methods does not know about blocks (Metablocks)
3551 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3552   assert_lock_strong(lock());
3553 #ifdef ASSERT
3554   if (Metadebug::test_metadata_failure()) {
3555     return NULL;
3556   }
3557 #endif
3558   // Is there space in the current chunk?
3559   MetaWord* result = NULL;
3560 
3561   if (current_chunk() != NULL) {
3562     result = current_chunk()->allocate(word_size);
3563   }
3564 
3565   if (result == NULL) {
3566     result = grow_and_allocate(word_size);
3567   }
3568 
3569   if (result != NULL) {
3570     account_for_allocation(word_size);
3571     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3572            "Head of the list is being allocated");
3573   }
3574 
3575   return result;
3576 }
3577 
3578 void SpaceManager::verify() {
3579   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3580     Metachunk* curr = chunks_in_use(i);
3581     while (curr != NULL) {
3582       DEBUG_ONLY(do_verify_chunk(curr);)
3583       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3584       curr = curr->next();
3585     }
3586   }
3587 }
3588 
3589 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3590   assert(is_humongous(chunk->word_size()) ||
3591          chunk->word_size() == medium_chunk_size() ||
3592          chunk->word_size() == small_chunk_size() ||
3593          chunk->word_size() == specialized_chunk_size(),
3594          "Chunk size is wrong");
3595   return;
3596 }
3597 
3598 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
3599   assert_lock_strong(lock());
3600   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3601     UsedChunksStatistics& chunk_stat = out->chunk_stats(i);
3602     Metachunk* chunk = chunks_in_use(i);
3603     while (chunk != NULL) {
3604       chunk_stat.add_num(1);
3605       chunk_stat.add_cap(chunk->word_size());
3606       chunk_stat.add_overhead(Metachunk::overhead());
3607       chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
3608       if (chunk != current_chunk()) {
3609         chunk_stat.add_waste(chunk->free_word_size());
3610       } else {
3611         chunk_stat.add_free(chunk->free_word_size());
3612       }
3613       chunk = chunk->next();
3614     }
3615   }
3616   if (block_freelists() != NULL) {
3617     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
3618   }
3619 }
3620 
3621 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
3622   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3623   add_to_statistics_locked(out);
3624 }
3625 
3626 #ifdef ASSERT
3627 void SpaceManager::verify_metrics_locked() const {
3628   assert_lock_strong(lock());
3629 
3630   SpaceManagerStatistics stat;
3631   add_to_statistics_locked(&stat);
3632 
3633   UsedChunksStatistics chunk_stats = stat.totals();
3634 
3635   DEBUG_ONLY(chunk_stats.check_sanity());
3636 
3637   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
3638   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
3639   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
3640 }
3641 
3642 void SpaceManager::verify_metrics() const {
3643   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3644   verify_metrics_locked();
3645 }
3646 #endif // ASSERT
3647 
3648 
3649 
3650 // MetaspaceUtils
3651 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
3652 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
3653 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
3654 
3655 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
3656 // output will be the accumulated values for all live metaspaces.
3657 // Note: method does not do any locking.
3658 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
3659   out->reset();
3660   ClassLoaderDataGraphMetaspaceIterator iter;
3661    while (iter.repeat()) {
3662      ClassLoaderMetaspace* msp = iter.get_next();
3663      if (msp != NULL) {
3664        msp->add_to_statistics(out);
3665      }
3666    }
3667 }
3668 
3669 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
3670   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3671   return list == NULL ? 0 : list->free_bytes();
3672 }
3673 
3674 size_t MetaspaceUtils::free_in_vs_bytes() {
3675   return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
3676 }
3677 
3678 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
3679   assert_lock_strong(MetaspaceExpand_lock);
3680   (*pstat) += words;
3681 }
3682 
3683 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
3684   assert_lock_strong(MetaspaceExpand_lock);
3685   const size_t size_now = *pstat;
3686   assert(size_now >= words, "About to decrement counter below zero "
3687          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3688          size_now, words);
3689   *pstat = size_now - words;
3690 }
3691 
3692 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
3693   Atomic::add(words, pstat);
3694 }
3695 
3696 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
3697   const size_t size_now = *pstat;
3698   assert(size_now >= words, "About to decrement counter below zero "
3699          "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
3700          size_now, words);
3701   Atomic::sub(words, pstat);
3702 }
3703 
3704 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3705   dec_stat_nonatomically(&_capacity_words[mdtype], words);
3706 }
3707 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3708   inc_stat_nonatomically(&_capacity_words[mdtype], words);
3709 }
3710 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3711   dec_stat_atomically(&_used_words[mdtype], words);
3712 }
3713 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3714   inc_stat_atomically(&_used_words[mdtype], words);
3715 }
3716 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
3717   dec_stat_nonatomically(&_overhead_words[mdtype], words);
3718 }
3719 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
3720   inc_stat_nonatomically(&_overhead_words[mdtype], words);
3721 }
3722 
3723 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3724   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3725   return list == NULL ? 0 : list->reserved_bytes();
3726 }
3727 
3728 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3729   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3730   return list == NULL ? 0 : list->committed_bytes();
3731 }
3732 
3733 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3734 
3735 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3736   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3737   if (chunk_manager == NULL) {
3738     return 0;
3739   }
3740   chunk_manager->slow_verify();
3741   return chunk_manager->free_chunks_total_words();
3742 }
3743 
3744 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3745   return free_chunks_total_words(mdtype) * BytesPerWord;
3746 }
3747 
3748 size_t MetaspaceUtils::free_chunks_total_words() {
3749   return free_chunks_total_words(Metaspace::ClassType) +
3750          free_chunks_total_words(Metaspace::NonClassType);
3751 }
3752 
3753 size_t MetaspaceUtils::free_chunks_total_bytes() {
3754   return free_chunks_total_words() * BytesPerWord;
3755 }
3756 
3757 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3758   return Metaspace::get_chunk_manager(mdtype) != NULL;
3759 }
3760 
3761 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3762   if (!has_chunk_free_list(mdtype)) {
3763     return MetaspaceChunkFreeListSummary();
3764   }
3765 
3766   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3767   return cm->chunk_free_list_summary();
3768 }
3769 
3770 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3771   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3772                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3773 }
3774 
3775 void MetaspaceUtils::print_on(outputStream* out) {
3776   Metaspace::MetadataType nct = Metaspace::NonClassType;
3777 
3778   out->print_cr(" Metaspace       "
3779                 "used "      SIZE_FORMAT "K, "
3780                 "capacity "  SIZE_FORMAT "K, "
3781                 "committed " SIZE_FORMAT "K, "
3782                 "reserved "  SIZE_FORMAT "K",
3783                 used_bytes()/K,
3784                 capacity_bytes()/K,
3785                 committed_bytes()/K,
3786                 reserved_bytes()/K);
3787 
3788   if (Metaspace::using_class_space()) {
3789     Metaspace::MetadataType ct = Metaspace::ClassType;
3790     out->print_cr("  class space    "
3791                   "used "      SIZE_FORMAT "K, "
3792                   "capacity "  SIZE_FORMAT "K, "
3793                   "committed " SIZE_FORMAT "K, "
3794                   "reserved "  SIZE_FORMAT "K",
3795                   used_bytes(ct)/K,
3796                   capacity_bytes(ct)/K,
3797                   committed_bytes(ct)/K,
3798                   reserved_bytes(ct)/K);
3799   }
3800 }
3801 
3802 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
3803 private:
3804   outputStream* const _out;
3805   const size_t        _scale;
3806   const bool          _do_print;
3807   const bool          _break_down_by_chunktype;
3808 
3809 public:
3810 
3811   uintx                           _num_loaders;
3812   ClassLoaderMetaspaceStatistics  _stats_total;
3813 
3814   uintx                           _num_loaders_by_spacetype [Metaspace::MetaspaceTypeCount];
3815   ClassLoaderMetaspaceStatistics  _stats_by_spacetype [Metaspace::MetaspaceTypeCount];
3816 
3817 public:
3818   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale, bool do_print, bool break_down_by_chunktype)
3819     : _out(out), _scale(scale), _do_print(do_print), _break_down_by_chunktype(break_down_by_chunktype)
3820     , _num_loaders(0)
3821   {
3822     memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
3823   }
3824 
3825   void do_cld(ClassLoaderData* cld) {
3826 
3827     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3828 
3829     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
3830     if (msp == NULL) {
3831       return;
3832     }
3833 
3834     // Collect statistics for this class loader metaspace
3835     ClassLoaderMetaspaceStatistics this_cld_stat;
3836     msp->add_to_statistics(&this_cld_stat);
3837 
3838     // And add it to the running totals
3839     _stats_total.add(this_cld_stat);
3840     _num_loaders ++;
3841     _stats_by_spacetype[msp->space_type()].add(this_cld_stat);
3842     _num_loaders_by_spacetype[msp->space_type()] ++;
3843 
3844     // Optionally, print.
3845     if (_do_print) {
3846 
3847       _out->print(UINTX_FORMAT_W(4) ": ", _num_loaders);
3848 
3849       if (cld->is_anonymous()) {
3850         _out->print("ClassLoaderData " PTR_FORMAT " for anonymous class", p2i(cld));
3851       } else {
3852         ResourceMark rm;
3853         _out->print("ClassLoaderData " PTR_FORMAT " for %s", p2i(cld), cld->loader_name());
3854       }
3855 
3856       if (cld->is_unloading()) {
3857         _out->print(" (unloading)");
3858       }
3859 
3860       this_cld_stat.print_on(_out, _scale, _break_down_by_chunktype);
3861       _out->cr();
3862 
3863     }
3864 
3865   } // do_cld
3866 
3867 };
3868 
3869 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
3870   const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3871   const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
3872   {
3873     if (Metaspace::using_class_space()) {
3874       out->print("  Non-class space:  ");
3875     }
3876     print_scaled_words(out, reserved_nonclass_words, scale, 7);
3877     out->print(" reserved, ");
3878     print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
3879     out->print_cr(" committed ");
3880 
3881     if (Metaspace::using_class_space()) {
3882       const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3883       const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
3884       out->print("      Class space:  ");
3885       print_scaled_words(out, reserved_class_words, scale, 7);
3886       out->print(" reserved, ");
3887       print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
3888       out->print_cr(" committed ");
3889 
3890       const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
3891       const size_t committed_words = committed_nonclass_words + committed_class_words;
3892       out->print("             Both:  ");
3893       print_scaled_words(out, reserved_words, scale, 7);
3894       out->print(" reserved, ");
3895       print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
3896       out->print_cr(" committed ");
3897     }
3898   }
3899 }
3900 
3901 // This will print out a basic metaspace usage report but
3902 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
3903 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
3904 
3905   out->cr();
3906   out->print_cr("Usage:");
3907 
3908   if (Metaspace::using_class_space()) {
3909     out->print("  Non-class:  ");
3910   }
3911 
3912   // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
3913   // MetaspaceUtils.
3914   const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
3915   const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
3916   const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
3917   const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
3918 
3919   print_scaled_words(out, cap_nc, scale, 5);
3920   out->print(" capacity, ");
3921   print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
3922   out->print(" used, ");
3923   print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
3924   out->print(" free+waste, ");
3925   print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
3926   out->print(" overhead. ");
3927   out->cr();
3928 
3929   if (Metaspace::using_class_space()) {
3930     const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
3931     const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
3932     const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
3933     const size_t free_and_waste_c = cap_c - overhead_c - used_c;
3934     out->print("      Class:  ");
3935     print_scaled_words(out, cap_c, scale, 5);
3936     out->print(" capacity, ");
3937     print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
3938     out->print(" used, ");
3939     print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
3940     out->print(" free+waste, ");
3941     print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
3942     out->print(" overhead. ");
3943     out->cr();
3944 
3945     out->print("       Both:  ");
3946     const size_t cap = cap_nc + cap_c;
3947 
3948     print_scaled_words(out, cap, scale, 5);
3949     out->print(" capacity, ");
3950     print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
3951     out->print(" used, ");
3952     print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
3953     out->print(" free+waste, ");
3954     print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
3955     out->print(" overhead. ");
3956     out->cr();
3957   }
3958 
3959   out->cr();
3960   out->print_cr("Virtual space:");
3961 
3962   print_vs(out, scale);
3963 
3964   out->cr();
3965   out->print_cr("Chunk freelists:");
3966 
3967   if (Metaspace::using_class_space()) {
3968     out->print("   Non-Class:  ");
3969   }
3970   print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3971   out->cr();
3972   if (Metaspace::using_class_space()) {
3973     out->print("       Class:  ");
3974     print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words(), scale);
3975     out->cr();
3976     out->print("        Both:  ");
3977     print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_words() +
3978                               Metaspace::chunk_manager_metadata()->free_chunks_total_words(), scale);
3979     out->cr();
3980   }
3981   out->cr();
3982 
3983 }
3984 
3985 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
3986 
3987   const bool print_loaders = (flags & rf_show_loaders) > 0;
3988   const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
3989   const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
3990 
3991   // Some report options require walking the class loader data graph.
3992   PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_by_chunktype);
3993   if (print_loaders) {
3994     out->cr();
3995     out->print_cr("Usage per loader:");
3996     out->cr();
3997   }
3998 
3999   ClassLoaderDataGraph::cld_do(&cl); // collect data and optionally print
4000 
4001   // Print totals, broken up by space type.
4002   if (print_by_spacetype) {
4003     out->cr();
4004     out->print_cr("Usage per space type:");
4005     out->cr();
4006     for (int space_type = (int)Metaspace::ZeroMetaspaceType;
4007          space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
4008     {
4009       uintx num = cl._num_loaders_by_spacetype[space_type];
4010       out->print("%s (" UINTX_FORMAT " loader%s)%c",
4011         space_type_name((Metaspace::MetaspaceType)space_type),
4012         num, (num == 1 ? "" : "s"), (num > 0 ? ':' : '.'));
4013       if (num > 0) {
4014         cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
4015       }
4016       out->cr();
4017     }
4018   }
4019 
4020   // Print totals for in-use data:
4021   out->cr();
4022   out->print_cr("Total Usage ( " UINTX_FORMAT " loader%s)%c",
4023       cl._num_loaders, (cl._num_loaders == 1 ? "" : "s"), (cl._num_loaders > 0 ? ':' : '.'));
4024 
4025   cl._stats_total.print_on(out, scale, print_by_chunktype);
4026 
4027   // -- Print Virtual space.
4028   out->cr();
4029   out->print_cr("Virtual space:");
4030 
4031   print_vs(out, scale);
4032 
4033   // -- Print VirtualSpaceList details.
4034   if ((flags & rf_show_vslist) > 0) {
4035     out->cr();
4036     out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
4037 
4038     if (Metaspace::using_class_space()) {
4039       out->print_cr("   Non-Class:");
4040     }
4041     Metaspace::space_list()->print_on(out, scale);
4042     if (Metaspace::using_class_space()) {
4043       out->print_cr("       Class:");
4044       Metaspace::class_space_list()->print_on(out, scale);
4045     }
4046   }
4047   out->cr();
4048 
4049   // -- Print VirtualSpaceList map.
4050   if ((flags & rf_show_vsmap) > 0) {
4051     out->cr();
4052     out->print_cr("Virtual space map:");
4053 
4054     if (Metaspace::using_class_space()) {
4055       out->print_cr("   Non-Class:");
4056     }
4057     Metaspace::space_list()->print_map(out);
4058     if (Metaspace::using_class_space()) {
4059       out->print_cr("       Class:");
4060       Metaspace::class_space_list()->print_map(out);
4061     }
4062   }
4063   out->cr();
4064 
4065   // -- Print Freelists (ChunkManager) details
4066   out->cr();
4067   out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
4068 
4069   ChunkManagerStatistics non_class_cm_stat;
4070   Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
4071 
4072   if (Metaspace::using_class_space()) {
4073     out->print_cr("   Non-Class:");
4074   }
4075   non_class_cm_stat.print_on(out, scale);
4076 
4077   if (Metaspace::using_class_space()) {
4078     ChunkManagerStatistics class_cm_stat;
4079     Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
4080     out->print_cr("       Class:");
4081     class_cm_stat.print_on(out, scale);
4082   }
4083 
4084   // As a convenience, print a summary of common waste.
4085   out->cr();
4086   out->print("Waste ");
4087   // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
4088   const size_t committed_words = committed_bytes() / BytesPerWord;
4089 
4090   out->print("(percentages refer to total committed size ");
4091   print_scaled_words(out, committed_words, scale);
4092   out->print_cr("):");
4093 
4094   // Print space committed but not yet used by any class loader
4095   const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
4096   out->print("              Committed unused: ");
4097   print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
4098   out->cr();
4099 
4100   // Print waste for in-use chunks.
4101   UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
4102   UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
4103   UsedChunksStatistics ucs_all;
4104   ucs_all.add(ucs_nonclass);
4105   ucs_all.add(ucs_class);
4106 
4107   out->print("        Waste in chunks in use: ");
4108   print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
4109   out->cr();
4110   out->print("         Free in chunks in use: ");
4111   print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
4112   out->cr();
4113   out->print("     Overhead in chunks in use: ");
4114   print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
4115   out->cr();
4116 
4117   // Print waste in free chunks.
4118   const size_t total_capacity_in_free_chunks =
4119       Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
4120      (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
4121   out->print("                In free chunks: ");
4122   print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
4123   out->cr();
4124 
4125   // Print waste in deallocated blocks.
4126   const uintx free_blocks_num =
4127       cl._stats_total.nonclass_sm_stats().free_blocks_num() +
4128       cl._stats_total.class_sm_stats().free_blocks_num();
4129   const size_t free_blocks_cap_words =
4130       cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
4131       cl._stats_total.class_sm_stats().free_blocks_cap_words();
4132   out->print("Deallocated from chunks in use: ");
4133   print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
4134   out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
4135   out->cr();
4136 
4137   // Print total waste.
4138   const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
4139       + free_blocks_cap_words + unused_words_in_vs;
4140   out->print("                       -total-: ");
4141   print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
4142   out->cr();
4143 
4144   // Print internal statistics
4145 #ifdef ASSERT
4146   out->cr();
4147   out->cr();
4148   out->print_cr("Internal statistics:");
4149   out->cr();
4150   out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
4151   out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
4152   out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
4153   out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
4154   out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
4155   out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
4156   out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
4157   out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
4158   out->cr();
4159 #endif
4160 
4161   // Print some interesting settings
4162   out->cr();
4163   out->cr();
4164   out->print("MaxMetaspaceSize: ");
4165   print_human_readable_size(out, MaxMetaspaceSize, scale);
4166   out->cr();
4167   out->print("InitialBootClassLoaderMetaspaceSize: ");
4168   print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
4169   out->cr();
4170 
4171   out->print("UseCompressedClassPointers: %s", UseCompressedClassPointers ? "true" : "false");
4172   out->cr();
4173   if (Metaspace::using_class_space()) {
4174     out->print("CompressedClassSpaceSize: ");
4175     print_human_readable_size(out, CompressedClassSpaceSize, scale);
4176   }
4177 
4178   out->cr();
4179   out->cr();
4180 
4181 } // MetaspaceUtils::print_report()
4182 
4183 // Prints an ASCII representation of the given space.
4184 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4185   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4186   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4187   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4188   if (vsl != NULL) {
4189     if (for_class) {
4190       if (!Metaspace::using_class_space()) {
4191         out->print_cr("No Class Space.");
4192         return;
4193       }
4194       out->print_raw("---- Metaspace Map (Class Space) ----");
4195     } else {
4196       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4197     }
4198     // Print legend:
4199     out->cr();
4200     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4201     out->cr();
4202     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4203     vsl->print_map(out);
4204     out->cr();
4205   }
4206 }
4207 
4208 void MetaspaceUtils::verify_free_chunks() {
4209   Metaspace::chunk_manager_metadata()->verify();
4210   if (Metaspace::using_class_space()) {
4211     Metaspace::chunk_manager_class()->verify();
4212   }
4213 }
4214 
4215 void MetaspaceUtils::verify_metrics() {
4216 #ifdef ASSERT
4217   // Please note: there are time windows where the internal counters are out of sync with
4218   // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
4219   // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
4220   // not be counted when iterating the CLDG. So be careful when you call this method.
4221   ClassLoaderMetaspaceStatistics total_stat;
4222   collect_statistics(&total_stat);
4223   UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
4224   UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
4225 
4226   bool mismatch = false;
4227   for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
4228     Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
4229     UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
4230     if (capacity_words(mdtype) != chunk_stat.cap() ||
4231         used_words(mdtype) != chunk_stat.used() ||
4232         overhead_words(mdtype) != chunk_stat.overhead()) {
4233       mismatch = true;
4234       tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
4235       tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4236                     capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
4237       tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
4238                     chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
4239       tty->flush();
4240     }
4241   }
4242   assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
4243 #endif
4244 }
4245 
4246 
4247 // Metaspace methods
4248 
4249 size_t Metaspace::_first_chunk_word_size = 0;
4250 size_t Metaspace::_first_class_chunk_word_size = 0;
4251 
4252 size_t Metaspace::_commit_alignment = 0;
4253 size_t Metaspace::_reserve_alignment = 0;
4254 
4255 VirtualSpaceList* Metaspace::_space_list = NULL;
4256 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4257 
4258 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4259 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4260 
4261 #define VIRTUALSPACEMULTIPLIER 2
4262 
4263 #ifdef _LP64
4264 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4265 
4266 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4267   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4268   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4269   // narrow_klass_base is the lower of the metaspace base and the cds base
4270   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4271   // between the lower base and higher address.
4272   address lower_base;
4273   address higher_address;
4274 #if INCLUDE_CDS
4275   if (UseSharedSpaces) {
4276     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4277                           (address)(metaspace_base + compressed_class_space_size()));
4278     lower_base = MIN2(metaspace_base, cds_base);
4279   } else
4280 #endif
4281   {
4282     higher_address = metaspace_base + compressed_class_space_size();
4283     lower_base = metaspace_base;
4284 
4285     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4286     // If compressed class space fits in lower 32G, we don't need a base.
4287     if (higher_address <= (address)klass_encoding_max) {
4288       lower_base = 0; // Effectively lower base is zero.
4289     }
4290   }
4291 
4292   Universe::set_narrow_klass_base(lower_base);
4293 
4294   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4295   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4296   // how dump time narrow_klass_shift is set. Although, CDS can work
4297   // with zero-shift mode also, to be consistent with AOT it uses
4298   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4299   // can be used at same time as AOT code.
4300   if (!UseSharedSpaces
4301       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4302     Universe::set_narrow_klass_shift(0);
4303   } else {
4304     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4305   }
4306   AOTLoader::set_narrow_klass_shift();
4307 }
4308 
4309 #if INCLUDE_CDS
4310 // Return TRUE if the specified metaspace_base and cds_base are close enough
4311 // to work with compressed klass pointers.
4312 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4313   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4314   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4315   address lower_base = MIN2((address)metaspace_base, cds_base);
4316   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4317                                 (address)(metaspace_base + compressed_class_space_size()));
4318   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4319 }
4320 #endif
4321 
4322 // Try to allocate the metaspace at the requested addr.
4323 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4324   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4325   assert(using_class_space(), "called improperly");
4326   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4327   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4328          "Metaspace size is too big");
4329   assert_is_aligned(requested_addr, _reserve_alignment);
4330   assert_is_aligned(cds_base, _reserve_alignment);
4331   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4332 
4333   // Don't use large pages for the class space.
4334   bool large_pages = false;
4335 
4336 #if !(defined(AARCH64) || defined(AIX))
4337   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4338                                              _reserve_alignment,
4339                                              large_pages,
4340                                              requested_addr);
4341 #else // AARCH64
4342   ReservedSpace metaspace_rs;
4343 
4344   // Our compressed klass pointers may fit nicely into the lower 32
4345   // bits.
4346   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4347     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4348                                  _reserve_alignment,
4349                                  large_pages,
4350                                  requested_addr);
4351   }
4352 
4353   if (! metaspace_rs.is_reserved()) {
4354     // Aarch64: Try to align metaspace so that we can decode a compressed
4355     // klass with a single MOVK instruction.  We can do this iff the
4356     // compressed class base is a multiple of 4G.
4357     // Aix: Search for a place where we can find memory. If we need to load
4358     // the base, 4G alignment is helpful, too.
4359     size_t increment = AARCH64_ONLY(4*)G;
4360     for (char *a = align_up(requested_addr, increment);
4361          a < (char*)(1024*G);
4362          a += increment) {
4363       if (a == (char *)(32*G)) {
4364         // Go faster from here on. Zero-based is no longer possible.
4365         increment = 4*G;
4366       }
4367 
4368 #if INCLUDE_CDS
4369       if (UseSharedSpaces
4370           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4371         // We failed to find an aligned base that will reach.  Fall
4372         // back to using our requested addr.
4373         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4374                                      _reserve_alignment,
4375                                      large_pages,
4376                                      requested_addr);
4377         break;
4378       }
4379 #endif
4380 
4381       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4382                                    _reserve_alignment,
4383                                    large_pages,
4384                                    a);
4385       if (metaspace_rs.is_reserved())
4386         break;
4387     }
4388   }
4389 
4390 #endif // AARCH64
4391 
4392   if (!metaspace_rs.is_reserved()) {
4393 #if INCLUDE_CDS
4394     if (UseSharedSpaces) {
4395       size_t increment = align_up(1*G, _reserve_alignment);
4396 
4397       // Keep trying to allocate the metaspace, increasing the requested_addr
4398       // by 1GB each time, until we reach an address that will no longer allow
4399       // use of CDS with compressed klass pointers.
4400       char *addr = requested_addr;
4401       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4402              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4403         addr = addr + increment;
4404         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4405                                      _reserve_alignment, large_pages, addr);
4406       }
4407     }
4408 #endif
4409     // If no successful allocation then try to allocate the space anywhere.  If
4410     // that fails then OOM doom.  At this point we cannot try allocating the
4411     // metaspace as if UseCompressedClassPointers is off because too much
4412     // initialization has happened that depends on UseCompressedClassPointers.
4413     // So, UseCompressedClassPointers cannot be turned off at this point.
4414     if (!metaspace_rs.is_reserved()) {
4415       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4416                                    _reserve_alignment, large_pages);
4417       if (!metaspace_rs.is_reserved()) {
4418         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4419                                               compressed_class_space_size()));
4420       }
4421     }
4422   }
4423 
4424   // If we got here then the metaspace got allocated.
4425   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4426 
4427 #if INCLUDE_CDS
4428   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4429   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4430     FileMapInfo::stop_sharing_and_unmap(
4431         "Could not allocate metaspace at a compatible address");
4432   }
4433 #endif
4434   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4435                                   UseSharedSpaces ? (address)cds_base : 0);
4436 
4437   initialize_class_space(metaspace_rs);
4438 
4439   LogTarget(Trace, gc, metaspace) lt;
4440   if (lt.is_enabled()) {
4441     ResourceMark rm;
4442     LogStream ls(lt);
4443     print_compressed_class_space(&ls, requested_addr);
4444     ls.cr(); // ~LogStream does not autoflush.
4445   }
4446 }
4447 
4448 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4449   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4450                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4451   if (_class_space_list != NULL) {
4452     address base = (address)_class_space_list->current_virtual_space()->bottom();
4453     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4454                  compressed_class_space_size(), p2i(base));
4455     if (requested_addr != 0) {
4456       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4457     }
4458     st->cr();
4459   }
4460 }
4461 
4462 // For UseCompressedClassPointers the class space is reserved above the top of
4463 // the Java heap.  The argument passed in is at the base of the compressed space.
4464 void Metaspace::initialize_class_space(ReservedSpace rs) {
4465   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4466   assert(rs.size() >= CompressedClassSpaceSize,
4467          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4468   assert(using_class_space(), "Must be using class space");
4469   _class_space_list = new VirtualSpaceList(rs);
4470   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4471 
4472   if (!_class_space_list->initialization_succeeded()) {
4473     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4474   }
4475 }
4476 
4477 #endif
4478 
4479 void Metaspace::ergo_initialize() {
4480   if (DumpSharedSpaces) {
4481     // Using large pages when dumping the shared archive is currently not implemented.
4482     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4483   }
4484 
4485   size_t page_size = os::vm_page_size();
4486   if (UseLargePages && UseLargePagesInMetaspace) {
4487     page_size = os::large_page_size();
4488   }
4489 
4490   _commit_alignment  = page_size;
4491   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4492 
4493   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4494   // override if MaxMetaspaceSize was set on the command line or not.
4495   // This information is needed later to conform to the specification of the
4496   // java.lang.management.MemoryUsage API.
4497   //
4498   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4499   // globals.hpp to the aligned value, but this is not possible, since the
4500   // alignment depends on other flags being parsed.
4501   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4502 
4503   if (MetaspaceSize > MaxMetaspaceSize) {
4504     MetaspaceSize = MaxMetaspaceSize;
4505   }
4506 
4507   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4508 
4509   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4510 
4511   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4512   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4513 
4514   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4515 
4516   // Initial virtual space size will be calculated at global_initialize()
4517   size_t min_metaspace_sz =
4518       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4519   if (UseCompressedClassPointers) {
4520     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4521       if (min_metaspace_sz >= MaxMetaspaceSize) {
4522         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4523       } else {
4524         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4525                       MaxMetaspaceSize - min_metaspace_sz);
4526       }
4527     }
4528   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4529     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4530                   min_metaspace_sz);
4531   }
4532 
4533   set_compressed_class_space_size(CompressedClassSpaceSize);
4534 }
4535 
4536 void Metaspace::global_initialize() {
4537   MetaspaceGC::initialize();
4538 
4539 #if INCLUDE_CDS
4540   if (DumpSharedSpaces) {
4541     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4542   } else if (UseSharedSpaces) {
4543     // If any of the archived space fails to map, UseSharedSpaces
4544     // is reset to false. Fall through to the
4545     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4546     // metaspace.
4547     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4548   }
4549 
4550   if (!DumpSharedSpaces && !UseSharedSpaces)
4551 #endif // INCLUDE_CDS
4552   {
4553 #ifdef _LP64
4554     if (using_class_space()) {
4555       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4556       allocate_metaspace_compressed_klass_ptrs(base, 0);
4557     }
4558 #endif // _LP64
4559   }
4560 
4561   // Initialize these before initializing the VirtualSpaceList
4562   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4563   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4564   // Make the first class chunk bigger than a medium chunk so it's not put
4565   // on the medium chunk list.   The next chunk will be small and progress
4566   // from there.  This size calculated by -version.
4567   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4568                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4569   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4570   // Arbitrarily set the initial virtual space to a multiple
4571   // of the boot class loader size.
4572   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4573   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4574 
4575   // Initialize the list of virtual spaces.
4576   _space_list = new VirtualSpaceList(word_size);
4577   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4578 
4579   if (!_space_list->initialization_succeeded()) {
4580     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4581   }
4582 
4583   _tracer = new MetaspaceTracer();
4584 }
4585 
4586 void Metaspace::post_initialize() {
4587   MetaspaceGC::post_initialize();
4588 }
4589 
4590 void Metaspace::verify_global_initialization() {
4591   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4592   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4593 
4594   if (using_class_space()) {
4595     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4596     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4597   }
4598 }
4599 
4600 size_t Metaspace::align_word_size_up(size_t word_size) {
4601   size_t byte_size = word_size * wordSize;
4602   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4603 }
4604 
4605 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4606                               MetaspaceObj::Type type, TRAPS) {
4607   assert(!_frozen, "sanity");
4608   if (HAS_PENDING_EXCEPTION) {
4609     assert(false, "Should not allocate with exception pending");
4610     return NULL;  // caller does a CHECK_NULL too
4611   }
4612 
4613   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4614         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4615 
4616   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4617 
4618   // Try to allocate metadata.
4619   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4620 
4621   if (result == NULL) {
4622     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4623 
4624     // Allocation failed.
4625     if (is_init_completed() && !(DumpSharedSpaces && THREAD->is_VM_thread())) {
4626       // Only start a GC if the bootstrapping has completed.
4627       // Also, we cannot GC if we are at the end of the CDS dumping stage which runs inside
4628       // the VM thread.
4629 
4630       // Try to clean out some memory and retry.
4631       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4632     }
4633   }
4634 
4635   if (result == NULL) {
4636     if (DumpSharedSpaces) {
4637       // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
4638       // We should abort to avoid generating a potentially bad archive.
4639       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4640           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4641       tty->print_cr("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize);
4642       vm_exit(1);
4643     }
4644     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4645   }
4646 
4647   // Zero initialize.
4648   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4649 
4650   return result;
4651 }
4652 
4653 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4654   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4655 
4656   // If result is still null, we are out of memory.
4657   Log(gc, metaspace, freelist) log;
4658   if (log.is_info()) {
4659     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4660              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4661     ResourceMark rm;
4662     if (log.is_debug()) {
4663       if (loader_data->metaspace_or_null() != NULL) {
4664         LogStream ls(log.debug());
4665         loader_data->print_value_on(&ls);
4666         ls.cr(); // ~LogStream does not autoflush.
4667       }
4668     }
4669     LogStream ls(log.info());
4670     // In case of an OOM, log out a short but still useful report.
4671     MetaspaceUtils::print_basic_report(&ls, 0);
4672     ls.cr(); // ~LogStream does not autoflush.
4673   }
4674 
4675   bool out_of_compressed_class_space = false;
4676   if (is_class_space_allocation(mdtype)) {
4677     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4678     out_of_compressed_class_space =
4679       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4680       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4681       CompressedClassSpaceSize;
4682   }
4683 
4684   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4685   const char* space_string = out_of_compressed_class_space ?
4686     "Compressed class space" : "Metaspace";
4687 
4688   report_java_out_of_memory(space_string);
4689 
4690   if (JvmtiExport::should_post_resource_exhausted()) {
4691     JvmtiExport::post_resource_exhausted(
4692         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4693         space_string);
4694   }
4695 
4696   if (!is_init_completed()) {
4697     vm_exit_during_initialization("OutOfMemoryError", space_string);
4698   }
4699 
4700   if (out_of_compressed_class_space) {
4701     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4702   } else {
4703     THROW_OOP(Universe::out_of_memory_error_metaspace());
4704   }
4705 }
4706 
4707 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4708   switch (mdtype) {
4709     case Metaspace::ClassType: return "Class";
4710     case Metaspace::NonClassType: return "Metadata";
4711     default:
4712       assert(false, "Got bad mdtype: %d", (int) mdtype);
4713       return NULL;
4714   }
4715 }
4716 
4717 void Metaspace::purge(MetadataType mdtype) {
4718   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4719 }
4720 
4721 void Metaspace::purge() {
4722   MutexLockerEx cl(MetaspaceExpand_lock,
4723                    Mutex::_no_safepoint_check_flag);
4724   purge(NonClassType);
4725   if (using_class_space()) {
4726     purge(ClassType);
4727   }
4728 }
4729 
4730 bool Metaspace::contains(const void* ptr) {
4731   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4732     return true;
4733   }
4734   return contains_non_shared(ptr);
4735 }
4736 
4737 bool Metaspace::contains_non_shared(const void* ptr) {
4738   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4739      return true;
4740   }
4741 
4742   return get_space_list(NonClassType)->contains(ptr);
4743 }
4744 
4745 // ClassLoaderMetaspace
4746 
4747 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
4748   : _lock(lock)
4749   , _space_type(type)
4750   , _vsm(NULL)
4751   , _class_vsm(NULL)
4752 {
4753   initialize(lock, type);
4754 }
4755 
4756 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4757   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
4758   delete _vsm;
4759   if (Metaspace::using_class_space()) {
4760     delete _class_vsm;
4761   }
4762 }
4763 
4764 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4765   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4766   if (chunk != NULL) {
4767     // Add to this manager's list of chunks in use and make it the current_chunk().
4768     get_space_manager(mdtype)->add_chunk(chunk, true);
4769   }
4770 }
4771 
4772 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4773   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4774 
4775   // Get a chunk from the chunk freelist
4776   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4777 
4778   if (chunk == NULL) {
4779     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4780                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4781   }
4782 
4783   return chunk;
4784 }
4785 
4786 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4787   Metaspace::verify_global_initialization();
4788 
4789   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
4790 
4791   // Allocate SpaceManager for metadata objects.
4792   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4793 
4794   if (Metaspace::using_class_space()) {
4795     // Allocate SpaceManager for classes.
4796     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4797   }
4798 
4799   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4800 
4801   // Allocate chunk for metadata objects
4802   initialize_first_chunk(type, Metaspace::NonClassType);
4803 
4804   // Allocate chunk for class metadata objects
4805   if (Metaspace::using_class_space()) {
4806     initialize_first_chunk(type, Metaspace::ClassType);
4807   }
4808 }
4809 
4810 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4811   Metaspace::assert_not_frozen();
4812 
4813   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
4814 
4815   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4816   if (Metaspace::is_class_space_allocation(mdtype)) {
4817     return  class_vsm()->allocate(word_size);
4818   } else {
4819     return  vsm()->allocate(word_size);
4820   }
4821 }
4822 
4823 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4824   Metaspace::assert_not_frozen();
4825   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4826   assert(delta_bytes > 0, "Must be");
4827 
4828   size_t before = 0;
4829   size_t after = 0;
4830   MetaWord* res;
4831   bool incremented;
4832 
4833   // Each thread increments the HWM at most once. Even if the thread fails to increment
4834   // the HWM, an allocation is still attempted. This is because another thread must then
4835   // have incremented the HWM and therefore the allocation might still succeed.
4836   do {
4837     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4838     res = allocate(word_size, mdtype);
4839   } while (!incremented && res == NULL);
4840 
4841   if (incremented) {
4842     Metaspace::tracer()->report_gc_threshold(before, after,
4843                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4844     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4845   }
4846 
4847   return res;
4848 }
4849 
4850 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4851   return (vsm()->used_words() +
4852       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
4853 }
4854 
4855 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4856   return (vsm()->capacity_words() +
4857       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
4858 }
4859 
4860 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4861   Metaspace::assert_not_frozen();
4862   assert(!SafepointSynchronize::is_at_safepoint()
4863          || Thread::current()->is_VM_thread(), "should be the VM thread");
4864 
4865   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
4866 
4867   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4868 
4869   if (is_class && Metaspace::using_class_space()) {
4870     class_vsm()->deallocate(ptr, word_size);
4871   } else {
4872     vsm()->deallocate(ptr, word_size);
4873   }
4874 }
4875 
4876 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4877   assert(Metaspace::using_class_space(), "Has to use class space");
4878   return class_vsm()->calc_chunk_size(word_size);
4879 }
4880 
4881 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4882   // Print both class virtual space counts and metaspace.
4883   if (Verbose) {
4884     vsm()->print_on(out);
4885     if (Metaspace::using_class_space()) {
4886       class_vsm()->print_on(out);
4887     }
4888   }
4889 }
4890 
4891 void ClassLoaderMetaspace::verify() {
4892   vsm()->verify();
4893   if (Metaspace::using_class_space()) {
4894     class_vsm()->verify();
4895   }
4896 }
4897 
4898 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
4899   assert_lock_strong(lock());
4900   vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
4901   if (Metaspace::using_class_space()) {
4902     class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
4903   }
4904 }
4905 
4906 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
4907   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
4908   add_to_statistics_locked(out);
4909 }
4910 
4911 #ifdef ASSERT
4912 static void do_verify_chunk(Metachunk* chunk) {
4913   guarantee(chunk != NULL, "Sanity");
4914   // Verify chunk itself; then verify that it is consistent with the
4915   // occupany map of its containing node.
4916   chunk->verify();
4917   VirtualSpaceNode* const vsn = chunk->container();
4918   OccupancyMap* const ocmap = vsn->occupancy_map();
4919   ocmap->verify_for_chunk(chunk);
4920 }
4921 #endif
4922 
4923 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4924   chunk->set_is_tagged_free(!inuse);
4925   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4926   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4927 }
4928 
4929 /////////////// Unit tests ///////////////
4930 
4931 #ifndef PRODUCT
4932 
4933 class TestMetaspaceUtilsTest : AllStatic {
4934  public:
4935   static void test_reserved() {
4936     size_t reserved = MetaspaceUtils::reserved_bytes();
4937 
4938     assert(reserved > 0, "assert");
4939 
4940     size_t committed  = MetaspaceUtils::committed_bytes();
4941     assert(committed <= reserved, "assert");
4942 
4943     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
4944     assert(reserved_metadata > 0, "assert");
4945     assert(reserved_metadata <= reserved, "assert");
4946 
4947     if (UseCompressedClassPointers) {
4948       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
4949       assert(reserved_class > 0, "assert");
4950       assert(reserved_class < reserved, "assert");
4951     }
4952   }
4953 
4954   static void test_committed() {
4955     size_t committed = MetaspaceUtils::committed_bytes();
4956 
4957     assert(committed > 0, "assert");
4958 
4959     size_t reserved  = MetaspaceUtils::reserved_bytes();
4960     assert(committed <= reserved, "assert");
4961 
4962     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
4963     assert(committed_metadata > 0, "assert");
4964     assert(committed_metadata <= committed, "assert");
4965 
4966     if (UseCompressedClassPointers) {
4967       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
4968       assert(committed_class > 0, "assert");
4969       assert(committed_class < committed, "assert");
4970     }
4971   }
4972 
4973   static void test_virtual_space_list_large_chunk() {
4974     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
4975     MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
4976     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
4977     // vm_allocation_granularity aligned on Windows.
4978     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
4979     large_size += (os::vm_page_size()/BytesPerWord);
4980     vs_list->get_new_chunk(large_size, 0);
4981   }
4982 
4983   static void test() {
4984     test_reserved();
4985     test_committed();
4986     test_virtual_space_list_large_chunk();
4987   }
4988 };
4989 
4990 void TestMetaspaceUtils_test() {
4991   TestMetaspaceUtilsTest::test();
4992 }
4993 
4994 class TestVirtualSpaceNodeTest {
4995   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
4996                                           size_t& num_small_chunks,
4997                                           size_t& num_specialized_chunks) {
4998     num_medium_chunks = words_left / MediumChunk;
4999     words_left = words_left % MediumChunk;
5000 
5001     num_small_chunks = words_left / SmallChunk;
5002     words_left = words_left % SmallChunk;
5003     // how many specialized chunks can we get?
5004     num_specialized_chunks = words_left / SpecializedChunk;
5005     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5006   }
5007 
5008  public:
5009   static void test() {
5010     MutexLockerEx ml(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
5011     const size_t vsn_test_size_words = MediumChunk  * 4;
5012     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5013 
5014     // The chunk sizes must be multiples of eachother, or this will fail
5015     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5016     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5017 
5018     { // No committed memory in VSN
5019       ChunkManager cm(false);
5020       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5021       vsn.initialize();
5022       vsn.retire(&cm);
5023       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5024     }
5025 
5026     { // All of VSN is committed, half is used by chunks
5027       ChunkManager cm(false);
5028       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5029       vsn.initialize();
5030       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5031       vsn.get_chunk_vs(MediumChunk);
5032       vsn.get_chunk_vs(MediumChunk);
5033       vsn.retire(&cm);
5034       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5035       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5036     }
5037 
5038     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5039     // This doesn't work for systems with vm_page_size >= 16K.
5040     if (page_chunks < MediumChunk) {
5041       // 4 pages of VSN is committed, some is used by chunks
5042       ChunkManager cm(false);
5043       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5044 
5045       vsn.initialize();
5046       vsn.expand_by(page_chunks, page_chunks);
5047       vsn.get_chunk_vs(SmallChunk);
5048       vsn.get_chunk_vs(SpecializedChunk);
5049       vsn.retire(&cm);
5050 
5051       // committed - used = words left to retire
5052       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5053 
5054       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5055       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5056 
5057       assert(num_medium_chunks == 0, "should not get any medium chunks");
5058       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5059       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5060     }
5061 
5062     { // Half of VSN is committed, a humongous chunk is used
5063       ChunkManager cm(false);
5064       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5065       vsn.initialize();
5066       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5067       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5068       vsn.retire(&cm);
5069 
5070       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5071       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5072       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5073 
5074       assert(num_medium_chunks == 0, "should not get any medium chunks");
5075       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5076       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5077     }
5078 
5079   }
5080 
5081 #define assert_is_available_positive(word_size) \
5082   assert(vsn.is_available(word_size), \
5083          #word_size ": " PTR_FORMAT " bytes were not available in " \
5084          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5085          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5086 
5087 #define assert_is_available_negative(word_size) \
5088   assert(!vsn.is_available(word_size), \
5089          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5090          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5091          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5092 
5093   static void test_is_available_positive() {
5094     // Reserve some memory.
5095     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5096     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5097 
5098     // Commit some memory.
5099     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5100     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5101     assert(expanded, "Failed to commit");
5102 
5103     // Check that is_available accepts the committed size.
5104     assert_is_available_positive(commit_word_size);
5105 
5106     // Check that is_available accepts half the committed size.
5107     size_t expand_word_size = commit_word_size / 2;
5108     assert_is_available_positive(expand_word_size);
5109   }
5110 
5111   static void test_is_available_negative() {
5112     // Reserve some memory.
5113     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5114     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5115 
5116     // Commit some memory.
5117     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5118     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5119     assert(expanded, "Failed to commit");
5120 
5121     // Check that is_available doesn't accept a too large size.
5122     size_t two_times_commit_word_size = commit_word_size * 2;
5123     assert_is_available_negative(two_times_commit_word_size);
5124   }
5125 
5126   static void test_is_available_overflow() {
5127     // Reserve some memory.
5128     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5129     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5130 
5131     // Commit some memory.
5132     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5133     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5134     assert(expanded, "Failed to commit");
5135 
5136     // Calculate a size that will overflow the virtual space size.
5137     void* virtual_space_max = (void*)(uintptr_t)-1;
5138     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5139     size_t overflow_size = bottom_to_max + BytesPerWord;
5140     size_t overflow_word_size = overflow_size / BytesPerWord;
5141 
5142     // Check that is_available can handle the overflow.
5143     assert_is_available_negative(overflow_word_size);
5144   }
5145 
5146   static void test_is_available() {
5147     TestVirtualSpaceNodeTest::test_is_available_positive();
5148     TestVirtualSpaceNodeTest::test_is_available_negative();
5149     TestVirtualSpaceNodeTest::test_is_available_overflow();
5150   }
5151 };
5152 
5153 // The following test is placed here instead of a gtest / unittest file
5154 // because the ChunkManager class is only available in this file.
5155 void ChunkManager_test_list_index() {
5156   {
5157     // Test previous bug where a query for a humongous class metachunk,
5158     // incorrectly matched the non-class medium metachunk size.
5159     {
5160       ChunkManager manager(true);
5161 
5162       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5163 
5164       ChunkIndex index = manager.list_index(MediumChunk);
5165 
5166       assert(index == HumongousIndex,
5167           "Requested size is larger than ClassMediumChunk,"
5168           " so should return HumongousIndex. Got index: %d", (int)index);
5169     }
5170 
5171     // Check the specified sizes as well.
5172     {
5173       ChunkManager manager(true);
5174       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5175       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5176       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5177       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5178     }
5179     {
5180       ChunkManager manager(false);
5181       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5182       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5183       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5184       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5185     }
5186 
5187   }
5188 
5189 }
5190 
5191 #endif // !PRODUCT
5192 
5193 #ifdef ASSERT
5194 
5195 // The following test is placed here instead of a gtest / unittest file
5196 // because the ChunkManager class is only available in this file.
5197 class SpaceManagerTest : AllStatic {
5198   friend void SpaceManager_test_adjust_initial_chunk_size();
5199 
5200   static void test_adjust_initial_chunk_size(bool is_class) {
5201     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5202     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5203     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5204 
5205 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5206     do {                                                                         \
5207       size_t v = value;                                                          \
5208       size_t e = expected;                                                       \
5209       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5210              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5211     } while (0)
5212 
5213     // Smallest (specialized)
5214     test_adjust_initial_chunk_size(1,            smallest, is_class);
5215     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5216     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5217 
5218     // Small
5219     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5220     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5221     test_adjust_initial_chunk_size(normal,       normal, is_class);
5222 
5223     // Medium
5224     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5225     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5226     test_adjust_initial_chunk_size(medium,     medium, is_class);
5227 
5228     // Humongous
5229     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5230 
5231 #undef test_adjust_initial_chunk_size
5232   }
5233 
5234   static void test_adjust_initial_chunk_size() {
5235     test_adjust_initial_chunk_size(false);
5236     test_adjust_initial_chunk_size(true);
5237   }
5238 };
5239 
5240 void SpaceManager_test_adjust_initial_chunk_size() {
5241   SpaceManagerTest::test_adjust_initial_chunk_size();
5242 }
5243 
5244 #endif // ASSERT
5245 
5246 struct chunkmanager_statistics_t {
5247   int num_specialized_chunks;
5248   int num_small_chunks;
5249   int num_medium_chunks;
5250   int num_humongous_chunks;
5251 };
5252 
5253 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5254   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5255   ChunkManagerStatistics stat;
5256   chunk_manager->collect_statistics(&stat);
5257   out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
5258   out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
5259   out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
5260   out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
5261 }
5262 
5263 struct chunk_geometry_t {
5264   size_t specialized_chunk_word_size;
5265   size_t small_chunk_word_size;
5266   size_t medium_chunk_word_size;
5267 };
5268 
5269 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5270   if (mdType == Metaspace::NonClassType) {
5271     out->specialized_chunk_word_size = SpecializedChunk;
5272     out->small_chunk_word_size = SmallChunk;
5273     out->medium_chunk_word_size = MediumChunk;
5274   } else {
5275     out->specialized_chunk_word_size = ClassSpecializedChunk;
5276     out->small_chunk_word_size = ClassSmallChunk;
5277     out->medium_chunk_word_size = ClassMediumChunk;
5278   }
5279 }