1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Helper function that does a bunch of checks for a chunk.
  59 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  60 
  61 // Given a Metachunk, update its in-use information (both in the
  62 // chunk and the occupancy map).
  63 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  64 
  65 size_t const allocation_from_dictionary_limit = 4 * K;
  66 
  67 MetaWord* last_allocated = 0;
  68 
  69 size_t Metaspace::_compressed_class_space_size;
  70 const MetaspaceTracer* Metaspace::_tracer = NULL;
  71 
  72 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  73 
  74 enum ChunkSizes {    // in words.
  75   ClassSpecializedChunk = 128,
  76   SpecializedChunk = 128,
  77   ClassSmallChunk = 256,
  78   SmallChunk = 512,
  79   ClassMediumChunk = 4 * K,
  80   MediumChunk = 8 * K
  81 };
  82 
  83 // Returns size of this chunk type.
  84 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  85   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  86   size_t size = 0;
  87   if (is_class) {
  88     switch(chunktype) {
  89       case SpecializedIndex: size = ClassSpecializedChunk; break;
  90       case SmallIndex: size = ClassSmallChunk; break;
  91       case MediumIndex: size = ClassMediumChunk; break;
  92       default:
  93         ShouldNotReachHere();
  94     }
  95   } else {
  96     switch(chunktype) {
  97       case SpecializedIndex: size = SpecializedChunk; break;
  98       case SmallIndex: size = SmallChunk; break;
  99       case MediumIndex: size = MediumChunk; break;
 100       default:
 101         ShouldNotReachHere();
 102     }
 103   }
 104   return size;
 105 }
 106 
 107 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 108   if (is_class) {
 109     if (size == ClassSpecializedChunk) {
 110       return SpecializedIndex;
 111     } else if (size == ClassSmallChunk) {
 112       return SmallIndex;
 113     } else if (size == ClassMediumChunk) {
 114       return MediumIndex;
 115     } else if (size > ClassMediumChunk) {
 116       // A valid humongous chunk size is a multiple of the smallest chunk size.
 117       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 118       return HumongousIndex;
 119     }
 120   } else {
 121     if (size == SpecializedChunk) {
 122       return SpecializedIndex;
 123     } else if (size == SmallChunk) {
 124       return SmallIndex;
 125     } else if (size == MediumChunk) {
 126       return MediumIndex;
 127     } else if (size > MediumChunk) {
 128       // A valid humongous chunk size is a multiple of the smallest chunk size.
 129       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 130       return HumongousIndex;
 131     }
 132   }
 133   ShouldNotReachHere();
 134   return (ChunkIndex)-1;
 135 }
 136 
 137 
 138 static ChunkIndex next_chunk_index(ChunkIndex i) {
 139   assert(i < NumberOfInUseLists, "Out of bound");
 140   return (ChunkIndex) (i+1);
 141 }
 142 
 143 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 144   assert(i > ZeroIndex, "Out of bound");
 145   return (ChunkIndex) (i-1);
 146 }
 147 
 148 static const char* scale_unit(size_t scale) {
 149   switch(scale) {
 150     case 1: return "BYTES";
 151     case K: return "KB";
 152     case M: return "MB";
 153     case G: return "GB";
 154     default:
 155       ShouldNotReachHere();
 156       return NULL;
 157   }
 158 }
 159 
 160 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 161 uint MetaspaceGC::_shrink_factor = 0;
 162 bool MetaspaceGC::_should_concurrent_collect = false;
 163 
 164 typedef class FreeList<Metachunk> ChunkList;
 165 
 166 // Manages the global free lists of chunks.
 167 class ChunkManager : public CHeapObj<mtInternal> {
 168   friend class TestVirtualSpaceNodeTest;
 169 
 170   // Free list of chunks of different sizes.
 171   //   SpecializedChunk
 172   //   SmallChunk
 173   //   MediumChunk
 174   ChunkList _free_chunks[NumberOfFreeLists];
 175 
 176   // Whether or not this is the class chunkmanager.
 177   const bool _is_class;
 178 
 179   // Return non-humongous chunk list by its index.
 180   ChunkList* free_chunks(ChunkIndex index);
 181 
 182   // Returns non-humongous chunk list for the given chunk word size.
 183   ChunkList* find_free_chunks_list(size_t word_size);
 184 
 185   //   HumongousChunk
 186   ChunkTreeDictionary _humongous_dictionary;
 187 
 188   // Returns the humongous chunk dictionary.
 189   ChunkTreeDictionary* humongous_dictionary() {
 190     return &_humongous_dictionary;
 191   }
 192 
 193   // Size, in metaspace words, of all chunks managed by this ChunkManager
 194   size_t _free_chunks_total;
 195   // Number of chunks in this ChunkManager
 196   size_t _free_chunks_count;
 197 
 198   // Update counters after a chunk was added or removed removed.
 199   void account_for_added_chunk(const Metachunk* c);
 200   void account_for_removed_chunk(const Metachunk* c);
 201 
 202   // Debug support
 203 
 204   size_t sum_free_chunks();
 205   size_t sum_free_chunks_count();
 206 
 207   void locked_verify_free_chunks_total();
 208   void slow_locked_verify_free_chunks_total() {
 209     if (VerifyMetaspace) {
 210       locked_verify_free_chunks_total();
 211     }
 212   }
 213   void locked_verify_free_chunks_count();
 214   void slow_locked_verify_free_chunks_count() {
 215     if (VerifyMetaspace) {
 216       locked_verify_free_chunks_count();
 217     }
 218   }
 219   void verify_free_chunks_count();
 220 
 221   // Given a pointer to a chunk, attempts to merge it with neighboring
 222   // free chunks to form a bigger chunk. Returns true if successful.
 223   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 224 
 225   // Helper for chunk merging:
 226   //  Given an address range with 1-n chunks which are all supposed to be
 227   //  free and hence currently managed by this ChunkManager, remove them
 228   //  from this ChunkManager and mark them as invalid.
 229   // - This does not correct the occupancy map.
 230   // - This does not adjust the counters in ChunkManager.
 231   // - Does not adjust container count counter in containing VirtualSpaceNode.
 232   // Returns number of chunks removed.
 233   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 234 
 235   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 236   // split up the larger chunk into n smaller chunks, at least one of which should be
 237   // the target chunk of target chunk size. The smaller chunks, including the target
 238   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 239   // Note that this chunk is supposed to be removed from the freelist right away.
 240   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 241 
 242  public:
 243 
 244   struct ChunkManagerStatistics {
 245     size_t num_by_type[NumberOfFreeLists];
 246     size_t single_size_by_type[NumberOfFreeLists];
 247     size_t total_size_by_type[NumberOfFreeLists];
 248     size_t num_humongous_chunks;
 249     size_t total_size_humongous_chunks;
 250   };
 251 
 252   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 253   void get_statistics(ChunkManagerStatistics* stat) const;
 254   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 255 
 256 
 257   ChunkManager(bool is_class)
 258       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 259     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 260     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 261     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 262   }
 263 
 264   // Add or delete (return) a chunk to the global freelist.
 265   Metachunk* chunk_freelist_allocate(size_t word_size);
 266 
 267   // Map a size to a list index assuming that there are lists
 268   // for special, small, medium, and humongous chunks.
 269   ChunkIndex list_index(size_t size);
 270 
 271   // Map a given index to the chunk size.
 272   size_t size_by_index(ChunkIndex index) const;
 273 
 274   bool is_class() const { return _is_class; }
 275 
 276   // Convenience accessors.
 277   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 278   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 279   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 280 
 281   // Take a chunk from the ChunkManager. The chunk is expected to be in
 282   // the chunk manager (the freelist if non-humongous, the dictionary if
 283   // humongous).
 284   void remove_chunk(Metachunk* chunk);
 285 
 286   // Return a single chunk of type index to the ChunkManager.
 287   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 288 
 289   // Add the simple linked list of chunks to the freelist of chunks
 290   // of type index.
 291   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 292 
 293   // Total of the space in the free chunks list
 294   size_t free_chunks_total_words();
 295   size_t free_chunks_total_bytes();
 296 
 297   // Number of chunks in the free chunks list
 298   size_t free_chunks_count();
 299 
 300   // Remove from a list by size.  Selects list based on size of chunk.
 301   Metachunk* free_chunks_get(size_t chunk_word_size);
 302 
 303 #define index_bounds_check(index)                                         \
 304   assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
 305 
 306   size_t num_free_chunks(ChunkIndex index) const {
 307     index_bounds_check(index);
 308 
 309     if (index == HumongousIndex) {
 310       return _humongous_dictionary.total_free_blocks();
 311     }
 312 
 313     ssize_t count = _free_chunks[index].count();
 314     return count == -1 ? 0 : (size_t) count;
 315   }
 316 
 317   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 318     index_bounds_check(index);
 319 
 320     size_t word_size = 0;
 321     if (index == HumongousIndex) {
 322       word_size = _humongous_dictionary.total_size();
 323     } else {
 324       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 325       word_size = size_per_chunk_in_words * num_free_chunks(index);
 326     }
 327 
 328     return word_size * BytesPerWord;
 329   }
 330 
 331   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 332     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 333                                          num_free_chunks(SmallIndex),
 334                                          num_free_chunks(MediumIndex),
 335                                          num_free_chunks(HumongousIndex),
 336                                          size_free_chunks_in_bytes(SpecializedIndex),
 337                                          size_free_chunks_in_bytes(SmallIndex),
 338                                          size_free_chunks_in_bytes(MediumIndex),
 339                                          size_free_chunks_in_bytes(HumongousIndex));
 340   }
 341 
 342   // Debug support
 343   void verify();
 344   void slow_verify() {
 345     if (VerifyMetaspace) {
 346       verify();
 347     }
 348   }
 349   void locked_verify();
 350   void slow_locked_verify() {
 351     if (VerifyMetaspace) {
 352       locked_verify();
 353     }
 354   }
 355   void verify_free_chunks_total();
 356 
 357   void locked_print_free_chunks(outputStream* st);
 358   void locked_print_sum_free_chunks(outputStream* st);
 359 
 360   void print_on(outputStream* st) const;
 361 
 362   // Prints composition for both non-class and (if available)
 363   // class chunk manager.
 364   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 365 };
 366 
 367 class SmallBlocks : public CHeapObj<mtClass> {
 368   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 369   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 370 
 371  private:
 372   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 373 
 374   FreeList<Metablock>& list_at(size_t word_size) {
 375     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 376     return _small_lists[word_size - _small_block_min_size];
 377   }
 378 
 379  public:
 380   SmallBlocks() {
 381     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 382       uint k = i - _small_block_min_size;
 383       _small_lists[k].set_size(i);
 384     }
 385   }
 386 
 387   size_t total_size() const {
 388     size_t result = 0;
 389     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 390       uint k = i - _small_block_min_size;
 391       result = result + _small_lists[k].count() * _small_lists[k].size();
 392     }
 393     return result;
 394   }
 395 
 396   static uint small_block_max_size() { return _small_block_max_size; }
 397   static uint small_block_min_size() { return _small_block_min_size; }
 398 
 399   MetaWord* get_block(size_t word_size) {
 400     if (list_at(word_size).count() > 0) {
 401       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 402       return new_block;
 403     } else {
 404       return NULL;
 405     }
 406   }
 407   void return_block(Metablock* free_chunk, size_t word_size) {
 408     list_at(word_size).return_chunk_at_head(free_chunk, false);
 409     assert(list_at(word_size).count() > 0, "Should have a chunk");
 410   }
 411 
 412   void print_on(outputStream* st) const {
 413     st->print_cr("SmallBlocks:");
 414     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 415       uint k = i - _small_block_min_size;
 416       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 417     }
 418   }
 419 };
 420 
 421 // Used to manage the free list of Metablocks (a block corresponds
 422 // to the allocation of a quantum of metadata).
 423 class BlockFreelist : public CHeapObj<mtClass> {
 424   BlockTreeDictionary* const _dictionary;
 425   SmallBlocks* _small_blocks;
 426 
 427   // Only allocate and split from freelist if the size of the allocation
 428   // is at least 1/4th the size of the available block.
 429   const static int WasteMultiplier = 4;
 430 
 431   // Accessors
 432   BlockTreeDictionary* dictionary() const { return _dictionary; }
 433   SmallBlocks* small_blocks() {
 434     if (_small_blocks == NULL) {
 435       _small_blocks = new SmallBlocks();
 436     }
 437     return _small_blocks;
 438   }
 439 
 440  public:
 441   BlockFreelist();
 442   ~BlockFreelist();
 443 
 444   // Get and return a block to the free list
 445   MetaWord* get_block(size_t word_size);
 446   void return_block(MetaWord* p, size_t word_size);
 447 
 448   size_t total_size() const  {
 449     size_t result = dictionary()->total_size();
 450     if (_small_blocks != NULL) {
 451       result = result + _small_blocks->total_size();
 452     }
 453     return result;
 454   }
 455 
 456   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 457   void print_on(outputStream* st) const;
 458 };
 459 
 460 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 461 template <typename T> struct all_ones  { static const T value; };
 462 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 463 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 464 
 465 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 466 // keeps information about
 467 // - where a chunk starts
 468 // - whether a chunk is in-use or free
 469 // A bit in this bitmap represents one range of memory in the smallest
 470 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 471 class OccupancyMap : public CHeapObj<mtInternal> {
 472 
 473   // The address range this map covers.
 474   const MetaWord* const _reference_address;
 475   const size_t _word_size;
 476 
 477   // The word size of a specialized chunk, aka the number of words one
 478   // bit in this map represents.
 479   const size_t _smallest_chunk_word_size;
 480 
 481   // map data
 482   // Data are organized in two bit layers:
 483   // The first layer is the chunk-start-map. Here, a bit is set to mark
 484   // the corresponding region as the head of a chunk.
 485   // The second layer is the in-use-map. Here, a set bit indicates that
 486   // the corresponding belongs to a chunk which is in use.
 487   uint8_t* _map[2];
 488 
 489   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 490 
 491   // length, in bytes, of bitmap data
 492   size_t _map_size;
 493 
 494   // Returns true if bit at position pos at bit-layer layer is set.
 495   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 496     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 497     const unsigned byteoffset = pos / 8;
 498     assert(byteoffset < _map_size,
 499            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 500     const unsigned mask = 1 << (pos % 8);
 501     return (_map[layer][byteoffset] & mask) > 0;
 502   }
 503 
 504   // Changes bit at position pos at bit-layer layer to value v.
 505   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 506     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 507     const unsigned byteoffset = pos / 8;
 508     assert(byteoffset < _map_size,
 509            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 510     const unsigned mask = 1 << (pos % 8);
 511     if (v) {
 512       _map[layer][byteoffset] |= mask;
 513     } else {
 514       _map[layer][byteoffset] &= ~mask;
 515     }
 516   }
 517 
 518   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 519   // pos is 32/64 aligned and num_bits is 32/64.
 520   // This is the typical case when coalescing to medium chunks, whose size is
 521   // 32 or 64 times the specialized chunk size (depending on class or non class
 522   // case), so they occupy 64 bits which should be 64bit aligned, because
 523   // chunks are chunk-size aligned.
 524   template <typename T>
 525   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 526     assert(_map_size > 0, "not initialized");
 527     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 528     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 529     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 530     const size_t byteoffset = pos / 8;
 531     assert(byteoffset <= (_map_size - sizeof(T)),
 532            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 533     const T w = *(T*)(_map[layer] + byteoffset);
 534     return w > 0 ? true : false;
 535   }
 536 
 537   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 538   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 539     if (pos % 32 == 0 && num_bits == 32) {
 540       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 541     } else if (pos % 64 == 0 && num_bits == 64) {
 542       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 543     } else {
 544       for (unsigned n = 0; n < num_bits; n ++) {
 545         if (get_bit_at_position(pos + n, layer)) {
 546           return true;
 547         }
 548       }
 549     }
 550     return false;
 551   }
 552 
 553   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 554   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 555     assert(word_size % _smallest_chunk_word_size == 0,
 556         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 557     const unsigned pos = get_bitpos_for_address(p);
 558     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 559     return is_any_bit_set_in_region(pos, num_bits, layer);
 560   }
 561 
 562   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 563   // pos is 32/64 aligned and num_bits is 32/64.
 564   // This is the typical case when coalescing to medium chunks, whose size
 565   // is 32 or 64 times the specialized chunk size (depending on class or non
 566   // class case), so they occupy 64 bits which should be 64bit aligned,
 567   // because chunks are chunk-size aligned.
 568   template <typename T>
 569   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 570     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 571            (unsigned)(sizeof(T) * 8), pos);
 572     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 573            num_bits, (unsigned)(sizeof(T) * 8));
 574     const size_t byteoffset = pos / 8;
 575     assert(byteoffset <= (_map_size - sizeof(T)),
 576            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 577     T* const pw = (T*)(_map[layer] + byteoffset);
 578     *pw = v ? all_ones<T>::value : (T) 0;
 579   }
 580 
 581   // Set all bits in a region starting at pos to a value.
 582   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 583     assert(_map_size > 0, "not initialized");
 584     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 585     if (pos % 32 == 0 && num_bits == 32) {
 586       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 587     } else if (pos % 64 == 0 && num_bits == 64) {
 588       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 589     } else {
 590       for (unsigned n = 0; n < num_bits; n ++) {
 591         set_bit_at_position(pos + n, layer, v);
 592       }
 593     }
 594   }
 595 
 596   // Helper: sets all bits in a region [p, p+word_size).
 597   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 598     assert(word_size % _smallest_chunk_word_size == 0,
 599         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 600     const unsigned pos = get_bitpos_for_address(p);
 601     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 602     set_bits_of_region(pos, num_bits, layer, v);
 603   }
 604 
 605   // Helper: given an address, return the bit position representing that address.
 606   unsigned get_bitpos_for_address(const MetaWord* p) const {
 607     assert(_reference_address != NULL, "not initialized");
 608     assert(p >= _reference_address && p < _reference_address + _word_size,
 609            "Address %p out of range for occupancy map [%p..%p).",
 610             p, _reference_address, _reference_address + _word_size);
 611     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 612            "Address not aligned (%p).", p);
 613     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 614     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 615     return (unsigned) d;
 616   }
 617 
 618  public:
 619 
 620   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 621     _reference_address(reference_address), _word_size(word_size),
 622     _smallest_chunk_word_size(smallest_chunk_word_size) {
 623     assert(reference_address != NULL, "invalid reference address");
 624     assert(is_aligned(reference_address, smallest_chunk_word_size),
 625            "Reference address not aligned to smallest chunk size.");
 626     assert(is_aligned(word_size, smallest_chunk_word_size),
 627            "Word_size shall be a multiple of the smallest chunk size.");
 628     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 629     size_t num_bits = word_size / smallest_chunk_word_size;
 630     _map_size = (num_bits + 7) / 8;
 631     assert(_map_size * 8 >= num_bits, "sanity");
 632     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 633     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 634     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 635     memset(_map[1], 0, _map_size);
 636     memset(_map[0], 0, _map_size);
 637     // Sanity test: the first respectively last possible chunk start address in
 638     // the covered range shall map to the first and last bit in the bitmap.
 639     assert(get_bitpos_for_address(reference_address) == 0,
 640       "First chunk address in range must map to fist bit in bitmap.");
 641     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 642       "Last chunk address in range must map to last bit in bitmap.");
 643   }
 644 
 645   ~OccupancyMap() {
 646     os::free(_map[0]);
 647     os::free(_map[1]);
 648   }
 649 
 650   // Returns true if at address x a chunk is starting.
 651   bool chunk_starts_at_address(MetaWord* p) const {
 652     const unsigned pos = get_bitpos_for_address(p);
 653     return get_bit_at_position(pos, layer_chunk_start_map);
 654   }
 655 
 656   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 657     const unsigned pos = get_bitpos_for_address(p);
 658     set_bit_at_position(pos, layer_chunk_start_map, v);
 659   }
 660 
 661   // Removes all chunk-start-bits inside a region, typically as a
 662   // result of a chunk merge.
 663   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 664     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 665   }
 666 
 667   // Returns true if there are life (in use) chunks in the region limited
 668   // by [p, p+word_size).
 669   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 670     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 671   }
 672 
 673   // Marks the region starting at p with the size word_size as in use
 674   // or free, depending on v.
 675   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 676     set_bits_of_region(p, word_size, layer_in_use_map, v);
 677   }
 678 
 679 #ifdef ASSERT
 680   // Verify occupancy map for the address range [from, to).
 681   // We need to tell it the address range, because the memory the
 682   // occupancy map is covering may not be fully comitted yet.
 683   void verify(MetaWord* from, MetaWord* to) {
 684     Metachunk* chunk = NULL;
 685     int nth_bit_for_chunk = 0;
 686     MetaWord* chunk_end = NULL;
 687     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 688       const unsigned pos = get_bitpos_for_address(p);
 689       // Check the chunk-starts-info:
 690       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 691         // Chunk start marked in bitmap.
 692         chunk = (Metachunk*) p;
 693         if (chunk_end != NULL) {
 694           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 695                  "the next chunk to start at %p).", p, chunk_end);
 696         }
 697         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 698         if (chunk->get_chunk_type() != HumongousIndex) {
 699           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 700         }
 701         chunk_end = p + chunk->word_size();
 702         nth_bit_for_chunk = 0;
 703         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 704       } else {
 705         // No chunk start marked in bitmap.
 706         assert(chunk != NULL, "Chunk should start at start of address range.");
 707         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 708         nth_bit_for_chunk ++;
 709       }
 710       // Check the in-use-info:
 711       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 712       if (in_use_bit) {
 713         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 714                chunk, nth_bit_for_chunk);
 715       } else {
 716         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 717                chunk, nth_bit_for_chunk);
 718       }
 719     }
 720   }
 721 
 722   // Verify that a given chunk is correctly accounted for in the bitmap.
 723   void verify_for_chunk(Metachunk* chunk) {
 724     assert(chunk_starts_at_address((MetaWord*) chunk),
 725            "No chunk start marked in map for chunk %p.", chunk);
 726     // For chunks larger than the minimal chunk size, no other chunk
 727     // must start in its area.
 728     if (chunk->word_size() > _smallest_chunk_word_size) {
 729       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 730                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 731              "No chunk must start within another chunk.");
 732     }
 733     if (!chunk->is_tagged_free()) {
 734       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 735              "Chunk %p is in use but marked as free in map (%d %d).",
 736              chunk, chunk->get_chunk_type(), chunk->get_origin());
 737     } else {
 738       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 739              "Chunk %p is free but marked as in-use in map (%d %d).",
 740              chunk, chunk->get_chunk_type(), chunk->get_origin());
 741     }
 742   }
 743 
 744 #endif // ASSERT
 745 
 746 };
 747 
 748 // A VirtualSpaceList node.
 749 class VirtualSpaceNode : public CHeapObj<mtClass> {
 750   friend class VirtualSpaceList;
 751 
 752   // Link to next VirtualSpaceNode
 753   VirtualSpaceNode* _next;
 754 
 755   // Whether this node is contained in class or metaspace.
 756   const bool _is_class;
 757 
 758   // total in the VirtualSpace
 759   MemRegion _reserved;
 760   ReservedSpace _rs;
 761   VirtualSpace _virtual_space;
 762   MetaWord* _top;
 763   // count of chunks contained in this VirtualSpace
 764   uintx _container_count;
 765 
 766   OccupancyMap* _occupancy_map;
 767 
 768   // Convenience functions to access the _virtual_space
 769   char* low()  const { return virtual_space()->low(); }
 770   char* high() const { return virtual_space()->high(); }
 771 
 772   // The first Metachunk will be allocated at the bottom of the
 773   // VirtualSpace
 774   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 775 
 776   // Committed but unused space in the virtual space
 777   size_t free_words_in_vs() const;
 778 
 779   // True if this node belongs to class metaspace.
 780   bool is_class() const { return _is_class; }
 781 
 782   // Helper function for take_from_committed: allocate padding chunks
 783   // until top is at the given address.
 784   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 785 
 786  public:
 787 
 788   VirtualSpaceNode(bool is_class, size_t byte_size);
 789   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 790     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 791   ~VirtualSpaceNode();
 792 
 793   // Convenience functions for logical bottom and end
 794   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 795   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 796 
 797   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 798   OccupancyMap* occupancy_map() { return _occupancy_map; }
 799 
 800   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 801 
 802   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 803   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 804 
 805   bool is_pre_committed() const { return _virtual_space.special(); }
 806 
 807   // address of next available space in _virtual_space;
 808   // Accessors
 809   VirtualSpaceNode* next() { return _next; }
 810   void set_next(VirtualSpaceNode* v) { _next = v; }
 811 
 812   void set_reserved(MemRegion const v) { _reserved = v; }
 813   void set_top(MetaWord* v) { _top = v; }
 814 
 815   // Accessors
 816   MemRegion* reserved() { return &_reserved; }
 817   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 818 
 819   // Returns true if "word_size" is available in the VirtualSpace
 820   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 821 
 822   MetaWord* top() const { return _top; }
 823   void inc_top(size_t word_size) { _top += word_size; }
 824 
 825   uintx container_count() { return _container_count; }
 826   void inc_container_count();
 827   void dec_container_count();
 828 #ifdef ASSERT
 829   uintx container_count_slow();
 830   void verify_container_count();
 831 #endif
 832 
 833   // used and capacity in this single entry in the list
 834   size_t used_words_in_vs() const;
 835   size_t capacity_words_in_vs() const;
 836 
 837   bool initialize();
 838 
 839   // get space from the virtual space
 840   Metachunk* take_from_committed(size_t chunk_word_size);
 841 
 842   // Allocate a chunk from the virtual space and return it.
 843   Metachunk* get_chunk_vs(size_t chunk_word_size);
 844 
 845   // Expands/shrinks the committed space in a virtual space.  Delegates
 846   // to Virtualspace
 847   bool expand_by(size_t min_words, size_t preferred_words);
 848 
 849   // In preparation for deleting this node, remove all the chunks
 850   // in the node from any freelist.
 851   void purge(ChunkManager* chunk_manager);
 852 
 853   // If an allocation doesn't fit in the current node a new node is created.
 854   // Allocate chunks out of the remaining committed space in this node
 855   // to avoid wasting that memory.
 856   // This always adds up because all the chunk sizes are multiples of
 857   // the smallest chunk size.
 858   void retire(ChunkManager* chunk_manager);
 859 
 860 
 861   void print_on(outputStream* st) const;
 862   void print_map(outputStream* st, bool is_class) const;
 863 
 864   // Debug support
 865   DEBUG_ONLY(void mangle();)
 866   // Verify counters, all chunks in this list node and the occupancy map.
 867   DEBUG_ONLY(void verify();)
 868   // Verify that all free chunks in this node are ideally merged
 869   // (there not should be multiple small chunks where a large chunk could exist.)
 870   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 871 
 872 };
 873 
 874 #define assert_is_aligned(value, alignment)                  \
 875   assert(is_aligned((value), (alignment)),                   \
 876          SIZE_FORMAT_HEX " is not aligned to "               \
 877          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 878 
 879 // Decide if large pages should be committed when the memory is reserved.
 880 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 881   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 882     size_t words = bytes / BytesPerWord;
 883     bool is_class = false; // We never reserve large pages for the class space.
 884     if (MetaspaceGC::can_expand(words, is_class) &&
 885         MetaspaceGC::allowed_expansion() >= words) {
 886       return true;
 887     }
 888   }
 889 
 890   return false;
 891 }
 892 
 893   // byte_size is the size of the associated virtualspace.
 894 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 895   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 896   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 897   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 898   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 899 
 900   if (_rs.is_reserved()) {
 901     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 902     assert(_rs.size() != 0, "Catch if we get a 0 size");
 903     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 904     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 905 
 906     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 907   }
 908 }
 909 
 910 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 911   DEBUG_ONLY(this->verify();)
 912   Metachunk* chunk = first_chunk();
 913   Metachunk* invalid_chunk = (Metachunk*) top();
 914   while (chunk < invalid_chunk ) {
 915     assert(chunk->is_tagged_free(), "Should be tagged free");
 916     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 917     chunk_manager->remove_chunk(chunk);
 918     chunk->remove_sentinel();
 919     assert(chunk->next() == NULL &&
 920            chunk->prev() == NULL,
 921            "Was not removed from its list");
 922     chunk = (Metachunk*) next;
 923   }
 924 }
 925 
 926 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 927 
 928   if (bottom() == top()) {
 929     return;
 930   }
 931 
 932   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 933   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 934   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 935 
 936   int line_len = 100;
 937   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 938   line_len = (int)(section_len / spec_chunk_size);
 939 
 940   static const int NUM_LINES = 4;
 941 
 942   char* lines[NUM_LINES];
 943   for (int i = 0; i < NUM_LINES; i ++) {
 944     lines[i] = (char*)os::malloc(line_len, mtInternal);
 945   }
 946   int pos = 0;
 947   const MetaWord* p = bottom();
 948   const Metachunk* chunk = (const Metachunk*)p;
 949   const MetaWord* chunk_end = p + chunk->word_size();
 950   while (p < top()) {
 951     if (pos == line_len) {
 952       pos = 0;
 953       for (int i = 0; i < NUM_LINES; i ++) {
 954         st->fill_to(22);
 955         st->print_raw(lines[i], line_len);
 956         st->cr();
 957       }
 958     }
 959     if (pos == 0) {
 960       st->print(PTR_FORMAT ":", p2i(p));
 961     }
 962     if (p == chunk_end) {
 963       chunk = (Metachunk*)p;
 964       chunk_end = p + chunk->word_size();
 965     }
 966     // line 1: chunk starting points (a dot if that area is a chunk start).
 967     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 968 
 969     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 970     // chunk is in use.
 971     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 972     if (chunk->word_size() == spec_chunk_size) {
 973       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 974     } else if (chunk->word_size() == small_chunk_size) {
 975       lines[1][pos] = chunk_is_free ? 's' : 'S';
 976     } else if (chunk->word_size() == med_chunk_size) {
 977       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 978     } else if (chunk->word_size() > med_chunk_size) {
 979       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 980     } else {
 981       ShouldNotReachHere();
 982     }
 983 
 984     // Line 3: chunk origin
 985     const ChunkOrigin origin = chunk->get_origin();
 986     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 987 
 988     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 989     //         but were never used.
 990     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 991 
 992     p += spec_chunk_size;
 993     pos ++;
 994   }
 995   if (pos > 0) {
 996     for (int i = 0; i < NUM_LINES; i ++) {
 997       st->fill_to(22);
 998       st->print_raw(lines[i], line_len);
 999       st->cr();
1000     }
1001   }
1002   for (int i = 0; i < NUM_LINES; i ++) {
1003     os::free(lines[i]);
1004   }
1005 }
1006 
1007 
1008 #ifdef ASSERT
1009 uintx VirtualSpaceNode::container_count_slow() {
1010   uintx count = 0;
1011   Metachunk* chunk = first_chunk();
1012   Metachunk* invalid_chunk = (Metachunk*) top();
1013   while (chunk < invalid_chunk ) {
1014     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1015     do_verify_chunk(chunk);
1016     // Don't count the chunks on the free lists.  Those are
1017     // still part of the VirtualSpaceNode but not currently
1018     // counted.
1019     if (!chunk->is_tagged_free()) {
1020       count++;
1021     }
1022     chunk = (Metachunk*) next;
1023   }
1024   return count;
1025 }
1026 #endif
1027 
1028 #ifdef ASSERT
1029 // Verify counters, all chunks in this list node and the occupancy map.
1030 void VirtualSpaceNode::verify() {
1031   uintx num_in_use_chunks = 0;
1032   Metachunk* chunk = first_chunk();
1033   Metachunk* invalid_chunk = (Metachunk*) top();
1034 
1035   // Iterate the chunks in this node and verify each chunk.
1036   while (chunk < invalid_chunk ) {
1037     DEBUG_ONLY(do_verify_chunk(chunk);)
1038     if (!chunk->is_tagged_free()) {
1039       num_in_use_chunks ++;
1040     }
1041     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1042     chunk = (Metachunk*) next;
1043   }
1044   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1045          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1046   // Also verify the occupancy map.
1047   occupancy_map()->verify(this->bottom(), this->top());
1048 }
1049 #endif // ASSERT
1050 
1051 #ifdef ASSERT
1052 // Verify that all free chunks in this node are ideally merged
1053 // (there not should be multiple small chunks where a large chunk could exist.)
1054 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1055   Metachunk* chunk = first_chunk();
1056   Metachunk* invalid_chunk = (Metachunk*) top();
1057   // Shorthands.
1058   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1059   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1060   int num_free_chunks_since_last_med_boundary = -1;
1061   int num_free_chunks_since_last_small_boundary = -1;
1062   while (chunk < invalid_chunk ) {
1063     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1064     // Reset the counter when encountering a non-free chunk.
1065     if (chunk->get_chunk_type() != HumongousIndex) {
1066       if (chunk->is_tagged_free()) {
1067         // Count successive free, non-humongous chunks.
1068         if (is_aligned(chunk, size_small)) {
1069           assert(num_free_chunks_since_last_small_boundary <= 1,
1070                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1071           num_free_chunks_since_last_small_boundary = 0;
1072         } else if (num_free_chunks_since_last_small_boundary != -1) {
1073           num_free_chunks_since_last_small_boundary ++;
1074         }
1075         if (is_aligned(chunk, size_med)) {
1076           assert(num_free_chunks_since_last_med_boundary <= 1,
1077                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1078           num_free_chunks_since_last_med_boundary = 0;
1079         } else if (num_free_chunks_since_last_med_boundary != -1) {
1080           num_free_chunks_since_last_med_boundary ++;
1081         }
1082       } else {
1083         // Encountering a non-free chunk, reset counters.
1084         num_free_chunks_since_last_med_boundary = -1;
1085         num_free_chunks_since_last_small_boundary = -1;
1086       }
1087     } else {
1088       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1089       num_free_chunks_since_last_med_boundary = -1;
1090       num_free_chunks_since_last_small_boundary = -1;
1091     }
1092 
1093     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1094     chunk = (Metachunk*) next;
1095   }
1096 }
1097 #endif // ASSERT
1098 
1099 // List of VirtualSpaces for metadata allocation.
1100 class VirtualSpaceList : public CHeapObj<mtClass> {
1101   friend class VirtualSpaceNode;
1102 
1103   enum VirtualSpaceSizes {
1104     VirtualSpaceSize = 256 * K
1105   };
1106 
1107   // Head of the list
1108   VirtualSpaceNode* _virtual_space_list;
1109   // virtual space currently being used for allocations
1110   VirtualSpaceNode* _current_virtual_space;
1111 
1112   // Is this VirtualSpaceList used for the compressed class space
1113   bool _is_class;
1114 
1115   // Sum of reserved and committed memory in the virtual spaces
1116   size_t _reserved_words;
1117   size_t _committed_words;
1118 
1119   // Number of virtual spaces
1120   size_t _virtual_space_count;
1121 
1122   ~VirtualSpaceList();
1123 
1124   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1125 
1126   void set_virtual_space_list(VirtualSpaceNode* v) {
1127     _virtual_space_list = v;
1128   }
1129   void set_current_virtual_space(VirtualSpaceNode* v) {
1130     _current_virtual_space = v;
1131   }
1132 
1133   void link_vs(VirtualSpaceNode* new_entry);
1134 
1135   // Get another virtual space and add it to the list.  This
1136   // is typically prompted by a failed attempt to allocate a chunk
1137   // and is typically followed by the allocation of a chunk.
1138   bool create_new_virtual_space(size_t vs_word_size);
1139 
1140   // Chunk up the unused committed space in the current
1141   // virtual space and add the chunks to the free list.
1142   void retire_current_virtual_space();
1143 
1144  public:
1145   VirtualSpaceList(size_t word_size);
1146   VirtualSpaceList(ReservedSpace rs);
1147 
1148   size_t free_bytes();
1149 
1150   Metachunk* get_new_chunk(size_t chunk_word_size,
1151                            size_t suggested_commit_granularity);
1152 
1153   bool expand_node_by(VirtualSpaceNode* node,
1154                       size_t min_words,
1155                       size_t preferred_words);
1156 
1157   bool expand_by(size_t min_words,
1158                  size_t preferred_words);
1159 
1160   VirtualSpaceNode* current_virtual_space() {
1161     return _current_virtual_space;
1162   }
1163 
1164   bool is_class() const { return _is_class; }
1165 
1166   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1167 
1168   size_t reserved_words()  { return _reserved_words; }
1169   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1170   size_t committed_words() { return _committed_words; }
1171   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1172 
1173   void inc_reserved_words(size_t v);
1174   void dec_reserved_words(size_t v);
1175   void inc_committed_words(size_t v);
1176   void dec_committed_words(size_t v);
1177   void inc_virtual_space_count();
1178   void dec_virtual_space_count();
1179 
1180   bool contains(const void* ptr);
1181 
1182   // Unlink empty VirtualSpaceNodes and free it.
1183   void purge(ChunkManager* chunk_manager);
1184 
1185   void print_on(outputStream* st) const;
1186   void print_map(outputStream* st) const;
1187 
1188   class VirtualSpaceListIterator : public StackObj {
1189     VirtualSpaceNode* _virtual_spaces;
1190    public:
1191     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1192       _virtual_spaces(virtual_spaces) {}
1193 
1194     bool repeat() {
1195       return _virtual_spaces != NULL;
1196     }
1197 
1198     VirtualSpaceNode* get_next() {
1199       VirtualSpaceNode* result = _virtual_spaces;
1200       if (_virtual_spaces != NULL) {
1201         _virtual_spaces = _virtual_spaces->next();
1202       }
1203       return result;
1204     }
1205   };
1206 };
1207 
1208 class Metadebug : AllStatic {
1209   // Debugging support for Metaspaces
1210   static int _allocation_fail_alot_count;
1211 
1212  public:
1213 
1214   static void init_allocation_fail_alot_count();
1215 #ifdef ASSERT
1216   static bool test_metadata_failure();
1217 #endif
1218 };
1219 
1220 int Metadebug::_allocation_fail_alot_count = 0;
1221 
1222 //  SpaceManager - used by Metaspace to handle allocations
1223 class SpaceManager : public CHeapObj<mtClass> {
1224   friend class ClassLoaderMetaspace;
1225   friend class Metadebug;
1226 
1227  private:
1228 
1229   // protects allocations
1230   Mutex* const _lock;
1231 
1232   // Type of metadata allocated.
1233   const Metaspace::MetadataType   _mdtype;
1234 
1235   // Type of metaspace
1236   const Metaspace::MetaspaceType  _space_type;
1237 
1238   // List of chunks in use by this SpaceManager.  Allocations
1239   // are done from the current chunk.  The list is used for deallocating
1240   // chunks when the SpaceManager is freed.
1241   Metachunk* _chunks_in_use[NumberOfInUseLists];
1242   Metachunk* _current_chunk;
1243 
1244   // Maximum number of small chunks to allocate to a SpaceManager
1245   static uint const _small_chunk_limit;
1246 
1247   // Maximum number of specialize chunks to allocate for anonymous and delegating
1248   // metadata space to a SpaceManager
1249   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1250 
1251   // Sum of all space in allocated chunks
1252   size_t _allocated_blocks_words;
1253 
1254   // Sum of all allocated chunks
1255   size_t _allocated_chunks_words;
1256   size_t _allocated_chunks_count;
1257 
1258   // Free lists of blocks are per SpaceManager since they
1259   // are assumed to be in chunks in use by the SpaceManager
1260   // and all chunks in use by a SpaceManager are freed when
1261   // the class loader using the SpaceManager is collected.
1262   BlockFreelist* _block_freelists;
1263 
1264   // protects virtualspace and chunk expansions
1265   static const char*  _expand_lock_name;
1266   static const int    _expand_lock_rank;
1267   static Mutex* const _expand_lock;
1268 
1269  private:
1270   // Accessors
1271   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1272   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1273     _chunks_in_use[index] = v;
1274   }
1275 
1276   BlockFreelist* block_freelists() const { return _block_freelists; }
1277 
1278   Metaspace::MetadataType mdtype() { return _mdtype; }
1279 
1280   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1281   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1282 
1283   Metachunk* current_chunk() const { return _current_chunk; }
1284   void set_current_chunk(Metachunk* v) {
1285     _current_chunk = v;
1286   }
1287 
1288   Metachunk* find_current_chunk(size_t word_size);
1289 
1290   // Add chunk to the list of chunks in use
1291   void add_chunk(Metachunk* v, bool make_current);
1292   void retire_current_chunk();
1293 
1294   Mutex* lock() const { return _lock; }
1295 
1296  protected:
1297   void initialize();
1298 
1299  public:
1300   SpaceManager(Metaspace::MetadataType mdtype,
1301                Metaspace::MetaspaceType space_type,
1302                Mutex* lock);
1303   ~SpaceManager();
1304 
1305   enum ChunkMultiples {
1306     MediumChunkMultiple = 4
1307   };
1308 
1309   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1310   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1311   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1312 
1313   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1314 
1315   // Accessors
1316   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1317 
1318   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1319   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1320   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1321 
1322   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1323 
1324   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1325 
1326   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1327   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1328   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1329   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1330   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1331 
1332   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1333 
1334   static Mutex* expand_lock() { return _expand_lock; }
1335 
1336   // Increment the per Metaspace and global running sums for Metachunks
1337   // by the given size.  This is used when a Metachunk to added to
1338   // the in-use list.
1339   void inc_size_metrics(size_t words);
1340   // Increment the per Metaspace and global running sums Metablocks by the given
1341   // size.  This is used when a Metablock is allocated.
1342   void inc_used_metrics(size_t words);
1343   // Delete the portion of the running sums for this SpaceManager. That is,
1344   // the globals running sums for the Metachunks and Metablocks are
1345   // decremented for all the Metachunks in-use by this SpaceManager.
1346   void dec_total_from_size_metrics();
1347 
1348   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1349   // or return the unadjusted size if the requested size is humongous.
1350   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1351   size_t adjust_initial_chunk_size(size_t requested) const;
1352 
1353   // Get the initial chunks size for this metaspace type.
1354   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1355 
1356   size_t sum_capacity_in_chunks_in_use() const;
1357   size_t sum_used_in_chunks_in_use() const;
1358   size_t sum_free_in_chunks_in_use() const;
1359   size_t sum_waste_in_chunks_in_use() const;
1360   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1361 
1362   size_t sum_count_in_chunks_in_use();
1363   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1364 
1365   Metachunk* get_new_chunk(size_t chunk_word_size);
1366 
1367   // Block allocation and deallocation.
1368   // Allocates a block from the current chunk
1369   MetaWord* allocate(size_t word_size);
1370 
1371   // Helper for allocations
1372   MetaWord* allocate_work(size_t word_size);
1373 
1374   // Returns a block to the per manager freelist
1375   void deallocate(MetaWord* p, size_t word_size);
1376 
1377   // Based on the allocation size and a minimum chunk size,
1378   // returned chunk size (for expanding space for chunk allocation).
1379   size_t calc_chunk_size(size_t allocation_word_size);
1380 
1381   // Called when an allocation from the current chunk fails.
1382   // Gets a new chunk (may require getting a new virtual space),
1383   // and allocates from that chunk.
1384   MetaWord* grow_and_allocate(size_t word_size);
1385 
1386   // Notify memory usage to MemoryService.
1387   void track_metaspace_memory_usage();
1388 
1389   // debugging support.
1390 
1391   void dump(outputStream* const out) const;
1392   void print_on(outputStream* st) const;
1393   void locked_print_chunks_in_use_on(outputStream* st) const;
1394 
1395   void verify();
1396   void verify_chunk_size(Metachunk* chunk);
1397 #ifdef ASSERT
1398   void verify_allocated_blocks_words();
1399 #endif
1400 
1401   // This adjusts the size given to be greater than the minimum allocation size in
1402   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1403   size_t get_allocation_word_size(size_t word_size) {
1404     size_t byte_size = word_size * BytesPerWord;
1405 
1406     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1407     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1408 
1409     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1410     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1411 
1412     return raw_word_size;
1413   }
1414 };
1415 
1416 uint const SpaceManager::_small_chunk_limit = 4;
1417 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1418 
1419 const char* SpaceManager::_expand_lock_name =
1420   "SpaceManager chunk allocation lock";
1421 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1422 Mutex* const SpaceManager::_expand_lock =
1423   new Mutex(SpaceManager::_expand_lock_rank,
1424             SpaceManager::_expand_lock_name,
1425             Mutex::_allow_vm_block_flag,
1426             Monitor::_safepoint_check_never);
1427 
1428 void VirtualSpaceNode::inc_container_count() {
1429   assert_lock_strong(SpaceManager::expand_lock());
1430   _container_count++;
1431 }
1432 
1433 void VirtualSpaceNode::dec_container_count() {
1434   assert_lock_strong(SpaceManager::expand_lock());
1435   _container_count--;
1436 }
1437 
1438 #ifdef ASSERT
1439 void VirtualSpaceNode::verify_container_count() {
1440   assert(_container_count == container_count_slow(),
1441          "Inconsistency in container_count _container_count " UINTX_FORMAT
1442          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1443 }
1444 #endif
1445 
1446 // BlockFreelist methods
1447 
1448 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1449 
1450 BlockFreelist::~BlockFreelist() {
1451   delete _dictionary;
1452   if (_small_blocks != NULL) {
1453     delete _small_blocks;
1454   }
1455 }
1456 
1457 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1458   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1459 
1460   Metablock* free_chunk = ::new (p) Metablock(word_size);
1461   if (word_size < SmallBlocks::small_block_max_size()) {
1462     small_blocks()->return_block(free_chunk, word_size);
1463   } else {
1464   dictionary()->return_chunk(free_chunk);
1465 }
1466   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1467             SIZE_FORMAT, p2i(free_chunk), word_size);
1468 }
1469 
1470 MetaWord* BlockFreelist::get_block(size_t word_size) {
1471   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1472 
1473   // Try small_blocks first.
1474   if (word_size < SmallBlocks::small_block_max_size()) {
1475     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1476     // this space manager.
1477     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1478     if (new_block != NULL) {
1479       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1480               p2i(new_block), word_size);
1481       return new_block;
1482     }
1483   }
1484 
1485   if (word_size < BlockFreelist::min_dictionary_size()) {
1486     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1487     return NULL;
1488   }
1489 
1490   Metablock* free_block = dictionary()->get_chunk(word_size);
1491   if (free_block == NULL) {
1492     return NULL;
1493   }
1494 
1495   const size_t block_size = free_block->size();
1496   if (block_size > WasteMultiplier * word_size) {
1497     return_block((MetaWord*)free_block, block_size);
1498     return NULL;
1499   }
1500 
1501   MetaWord* new_block = (MetaWord*)free_block;
1502   assert(block_size >= word_size, "Incorrect size of block from freelist");
1503   const size_t unused = block_size - word_size;
1504   if (unused >= SmallBlocks::small_block_min_size()) {
1505     return_block(new_block + word_size, unused);
1506   }
1507 
1508   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1509             p2i(new_block), word_size);
1510   return new_block;
1511 }
1512 
1513 void BlockFreelist::print_on(outputStream* st) const {
1514   dictionary()->print_free_lists(st);
1515   if (_small_blocks != NULL) {
1516     _small_blocks->print_on(st);
1517   }
1518 }
1519 
1520 // VirtualSpaceNode methods
1521 
1522 VirtualSpaceNode::~VirtualSpaceNode() {
1523   _rs.release();
1524   if (_occupancy_map != NULL) {
1525     delete _occupancy_map;
1526   }
1527 #ifdef ASSERT
1528   size_t word_size = sizeof(*this) / BytesPerWord;
1529   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1530 #endif
1531 }
1532 
1533 size_t VirtualSpaceNode::used_words_in_vs() const {
1534   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1535 }
1536 
1537 // Space committed in the VirtualSpace
1538 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1539   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1540 }
1541 
1542 size_t VirtualSpaceNode::free_words_in_vs() const {
1543   return pointer_delta(end(), top(), sizeof(MetaWord));
1544 }
1545 
1546 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1547 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1548 
1549   assert(target_top > top(), "Sanity");
1550 
1551   // Padding chunks are added to the freelist.
1552   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1553 
1554   // shorthands
1555   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1556   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1557   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1558 
1559   while (top() < target_top) {
1560 
1561     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1562     // for padding chunks, so it is not worth it.
1563     size_t padding_chunk_word_size = small_word_size;
1564     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1565       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1566       padding_chunk_word_size = spec_word_size;
1567     }
1568     MetaWord* here = top();
1569     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1570     inc_top(padding_chunk_word_size);
1571 
1572     // Create new padding chunk.
1573     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1574     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1575 
1576     Metachunk* const padding_chunk =
1577       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1578     assert(padding_chunk == (Metachunk*)here, "Sanity");
1579     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1580     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1581                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1582                                        (is_class() ? "class space " : "metaspace"),
1583                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1584 
1585     // Mark chunk start in occupancy map.
1586     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1587 
1588     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1589     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1590     // will assert that).
1591     do_update_in_use_info_for_chunk(padding_chunk, true);
1592 
1593     // Return Chunk to freelist.
1594     inc_container_count();
1595     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1596     // Please note: at this point, ChunkManager::return_single_chunk()
1597     // may already have merged the padding chunk with neighboring chunks, so
1598     // it may have vanished at this point. Do not reference the padding
1599     // chunk beyond this point.
1600   }
1601 
1602   assert(top() == target_top, "Sanity");
1603 
1604 } // allocate_padding_chunks_until_top_is_at()
1605 
1606 // Allocates the chunk from the virtual space only.
1607 // This interface is also used internally for debugging.  Not all
1608 // chunks removed here are necessarily used for allocation.
1609 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1610   // Non-humongous chunks are to be allocated aligned to their chunk
1611   // size. So, start addresses of medium chunks are aligned to medium
1612   // chunk size, those of small chunks to small chunk size and so
1613   // forth. This facilitates merging of free chunks and reduces
1614   // fragmentation. Chunk sizes are spec < small < medium, with each
1615   // larger chunk size being a multiple of the next smaller chunk
1616   // size.
1617   // Because of this alignment, me may need to create a number of padding
1618   // chunks. These chunks are created and added to the freelist.
1619 
1620   // The chunk manager to which we will give our padding chunks.
1621   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1622 
1623   // shorthands
1624   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1625   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1626   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1627 
1628   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1629          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1630 
1631   // Chunk alignment (in bytes) == chunk size unless humongous.
1632   // Humongous chunks are aligned to the smallest chunk size (spec).
1633   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1634                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1635 
1636   // Do we have enough space to create the requested chunk plus
1637   // any padding chunks needed?
1638   MetaWord* const next_aligned =
1639     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1640   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1641     return NULL;
1642   }
1643 
1644   // Before allocating the requested chunk, allocate padding chunks if necessary.
1645   // We only need to do this for small or medium chunks: specialized chunks are the
1646   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1647   // (implicitly, also aligned to smallest chunk size).
1648   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1649     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1650         (is_class() ? "class space " : "metaspace"),
1651         top(), next_aligned);
1652     allocate_padding_chunks_until_top_is_at(next_aligned);
1653     // Now, top should be aligned correctly.
1654     assert_is_aligned(top(), required_chunk_alignment);
1655   }
1656 
1657   // Now, top should be aligned correctly.
1658   assert_is_aligned(top(), required_chunk_alignment);
1659 
1660   // Bottom of the new chunk
1661   MetaWord* chunk_limit = top();
1662   assert(chunk_limit != NULL, "Not safe to call this method");
1663 
1664   // The virtual spaces are always expanded by the
1665   // commit granularity to enforce the following condition.
1666   // Without this the is_available check will not work correctly.
1667   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1668       "The committed memory doesn't match the expanded memory.");
1669 
1670   if (!is_available(chunk_word_size)) {
1671     LogTarget(Debug, gc, metaspace, freelist) lt;
1672     if (lt.is_enabled()) {
1673       LogStream ls(lt);
1674       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1675       // Dump some information about the virtual space that is nearly full
1676       print_on(&ls);
1677     }
1678     return NULL;
1679   }
1680 
1681   // Take the space  (bump top on the current virtual space).
1682   inc_top(chunk_word_size);
1683 
1684   // Initialize the chunk
1685   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1686   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1687   assert(result == (Metachunk*)chunk_limit, "Sanity");
1688   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1689   do_update_in_use_info_for_chunk(result, true);
1690 
1691   inc_container_count();
1692 
1693   if (VerifyMetaspace) {
1694     DEBUG_ONLY(chunk_manager->locked_verify());
1695     DEBUG_ONLY(this->verify());
1696   }
1697 
1698   DEBUG_ONLY(do_verify_chunk(result));
1699 
1700   result->inc_use_count();
1701 
1702   return result;
1703 }
1704 
1705 
1706 // Expand the virtual space (commit more of the reserved space)
1707 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1708   size_t min_bytes = min_words * BytesPerWord;
1709   size_t preferred_bytes = preferred_words * BytesPerWord;
1710 
1711   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1712 
1713   if (uncommitted < min_bytes) {
1714     return false;
1715   }
1716 
1717   size_t commit = MIN2(preferred_bytes, uncommitted);
1718   bool result = virtual_space()->expand_by(commit, false);
1719 
1720   if (result) {
1721     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1722               (is_class() ? "class" : "non-class"), commit);
1723   } else {
1724     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1725               (is_class() ? "class" : "non-class"), commit);
1726   }
1727 
1728   assert(result, "Failed to commit memory");
1729 
1730   return result;
1731 }
1732 
1733 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1734   assert_lock_strong(SpaceManager::expand_lock());
1735   Metachunk* result = take_from_committed(chunk_word_size);
1736   return result;
1737 }
1738 
1739 bool VirtualSpaceNode::initialize() {
1740 
1741   if (!_rs.is_reserved()) {
1742     return false;
1743   }
1744 
1745   // These are necessary restriction to make sure that the virtual space always
1746   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1747   // aligned only the middle alignment of the VirtualSpace is used.
1748   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1749   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1750 
1751   // ReservedSpaces marked as special will have the entire memory
1752   // pre-committed. Setting a committed size will make sure that
1753   // committed_size and actual_committed_size agrees.
1754   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1755 
1756   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1757                                             Metaspace::commit_alignment());
1758   if (result) {
1759     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1760         "Checking that the pre-committed memory was registered by the VirtualSpace");
1761 
1762     set_top((MetaWord*)virtual_space()->low());
1763     set_reserved(MemRegion((HeapWord*)_rs.base(),
1764                  (HeapWord*)(_rs.base() + _rs.size())));
1765 
1766     assert(reserved()->start() == (HeapWord*) _rs.base(),
1767            "Reserved start was not set properly " PTR_FORMAT
1768            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1769     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1770            "Reserved size was not set properly " SIZE_FORMAT
1771            " != " SIZE_FORMAT, reserved()->word_size(),
1772            _rs.size() / BytesPerWord);
1773   }
1774 
1775   // Initialize Occupancy Map.
1776   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1777   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1778 
1779   return result;
1780 }
1781 
1782 void VirtualSpaceNode::print_on(outputStream* st) const {
1783   size_t used = used_words_in_vs();
1784   size_t capacity = capacity_words_in_vs();
1785   VirtualSpace* vs = virtual_space();
1786   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1787            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1788            PTR_FORMAT ", " PTR_FORMAT ")",
1789            p2i(vs), capacity / K,
1790            capacity == 0 ? 0 : used * 100 / capacity,
1791            p2i(bottom()), p2i(top()), p2i(end()),
1792            p2i(vs->high_boundary()));
1793 }
1794 
1795 #ifdef ASSERT
1796 void VirtualSpaceNode::mangle() {
1797   size_t word_size = capacity_words_in_vs();
1798   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1799 }
1800 #endif // ASSERT
1801 
1802 // VirtualSpaceList methods
1803 // Space allocated from the VirtualSpace
1804 
1805 VirtualSpaceList::~VirtualSpaceList() {
1806   VirtualSpaceListIterator iter(virtual_space_list());
1807   while (iter.repeat()) {
1808     VirtualSpaceNode* vsl = iter.get_next();
1809     delete vsl;
1810   }
1811 }
1812 
1813 void VirtualSpaceList::inc_reserved_words(size_t v) {
1814   assert_lock_strong(SpaceManager::expand_lock());
1815   _reserved_words = _reserved_words + v;
1816 }
1817 void VirtualSpaceList::dec_reserved_words(size_t v) {
1818   assert_lock_strong(SpaceManager::expand_lock());
1819   _reserved_words = _reserved_words - v;
1820 }
1821 
1822 #define assert_committed_below_limit()                        \
1823   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1824          "Too much committed memory. Committed: " SIZE_FORMAT \
1825          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1826           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1827 
1828 void VirtualSpaceList::inc_committed_words(size_t v) {
1829   assert_lock_strong(SpaceManager::expand_lock());
1830   _committed_words = _committed_words + v;
1831 
1832   assert_committed_below_limit();
1833 }
1834 void VirtualSpaceList::dec_committed_words(size_t v) {
1835   assert_lock_strong(SpaceManager::expand_lock());
1836   _committed_words = _committed_words - v;
1837 
1838   assert_committed_below_limit();
1839 }
1840 
1841 void VirtualSpaceList::inc_virtual_space_count() {
1842   assert_lock_strong(SpaceManager::expand_lock());
1843   _virtual_space_count++;
1844 }
1845 void VirtualSpaceList::dec_virtual_space_count() {
1846   assert_lock_strong(SpaceManager::expand_lock());
1847   _virtual_space_count--;
1848 }
1849 
1850 void ChunkManager::remove_chunk(Metachunk* chunk) {
1851   size_t word_size = chunk->word_size();
1852   ChunkIndex index = list_index(word_size);
1853   if (index != HumongousIndex) {
1854     free_chunks(index)->remove_chunk(chunk);
1855   } else {
1856     humongous_dictionary()->remove_chunk(chunk);
1857   }
1858 
1859   // Chunk has been removed from the chunks free list, update counters.
1860   account_for_removed_chunk(chunk);
1861 }
1862 
1863 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1864   assert_lock_strong(SpaceManager::expand_lock());
1865   assert(chunk != NULL, "invalid chunk pointer");
1866   // Check for valid merge combinations.
1867   assert((chunk->get_chunk_type() == SpecializedIndex &&
1868           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1869          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1870         "Invalid chunk merge combination.");
1871 
1872   const size_t target_chunk_word_size =
1873     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1874 
1875   // [ prospective merge region )
1876   MetaWord* const p_merge_region_start =
1877     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1878   MetaWord* const p_merge_region_end =
1879     p_merge_region_start + target_chunk_word_size;
1880 
1881   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1882   VirtualSpaceNode* const vsn = chunk->container();
1883   OccupancyMap* const ocmap = vsn->occupancy_map();
1884 
1885   // The prospective chunk merge range must be completely contained by the
1886   // committed range of the virtual space node.
1887   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1888     return false;
1889   }
1890 
1891   // Only attempt to merge this range if at its start a chunk starts and at its end
1892   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1893   // of that range, we cannot merge.
1894   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1895     return false;
1896   }
1897   if (p_merge_region_end < vsn->top() &&
1898       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1899     return false;
1900   }
1901 
1902   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1903   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1904     return false;
1905   }
1906 
1907   // Success! Remove all chunks in this region...
1908   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1909     (is_class() ? "class space" : "metaspace"),
1910     p_merge_region_start, p_merge_region_end);
1911 
1912   const int num_chunks_removed =
1913     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1914 
1915   // ... and create a single new bigger chunk.
1916   Metachunk* const p_new_chunk =
1917       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1918   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1919   p_new_chunk->set_origin(origin_merge);
1920 
1921   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1922     (is_class() ? "class space" : "metaspace"),
1923     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1924 
1925   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1926   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1927   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1928 
1929   // Mark chunk as free. Note: it is not necessary to update the occupancy
1930   // map in-use map, because the old chunks were also free, so nothing
1931   // should have changed.
1932   p_new_chunk->set_is_tagged_free(true);
1933 
1934   // Add new chunk to its freelist.
1935   ChunkList* const list = free_chunks(target_chunk_type);
1936   list->return_chunk_at_head(p_new_chunk);
1937 
1938   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1939   // should not have changed, because the size of the space should be the same)
1940   _free_chunks_count -= num_chunks_removed;
1941   _free_chunks_count ++;
1942 
1943   // VirtualSpaceNode::container_count does not have to be modified:
1944   // it means "number of active (non-free) chunks", so merging free chunks
1945   // should not affect that count.
1946 
1947   // At the end of a chunk merge, run verification tests.
1948   if (VerifyMetaspace) {
1949     DEBUG_ONLY(this->locked_verify());
1950     DEBUG_ONLY(vsn->verify());
1951   }
1952 
1953   return true;
1954 }
1955 
1956 // Remove all chunks in the given area - the chunks are supposed to be free -
1957 // from their corresponding freelists. Mark them as invalid.
1958 // - This does not correct the occupancy map.
1959 // - This does not adjust the counters in ChunkManager.
1960 // - Does not adjust container count counter in containing VirtualSpaceNode
1961 // Returns number of chunks removed.
1962 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1963   assert(p != NULL && word_size > 0, "Invalid range.");
1964   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1965   assert_is_aligned(word_size, smallest_chunk_size);
1966 
1967   Metachunk* const start = (Metachunk*) p;
1968   const Metachunk* const end = (Metachunk*)(p + word_size);
1969   Metachunk* cur = start;
1970   int num_removed = 0;
1971   while (cur < end) {
1972     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1973     DEBUG_ONLY(do_verify_chunk(cur));
1974     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1975     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1976     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1977       (is_class() ? "class space" : "metaspace"),
1978       cur, cur->word_size() * sizeof(MetaWord));
1979     cur->remove_sentinel();
1980     // Note: cannot call ChunkManager::remove_chunk, because that
1981     // modifies the counters in ChunkManager, which we do not want. So
1982     // we call remove_chunk on the freelist directly (see also the
1983     // splitting function which does the same).
1984     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1985     list->remove_chunk(cur);
1986     num_removed ++;
1987     cur = next;
1988   }
1989   return num_removed;
1990 }
1991 
1992 // Walk the list of VirtualSpaceNodes and delete
1993 // nodes with a 0 container_count.  Remove Metachunks in
1994 // the node from their respective freelists.
1995 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1996   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1997   assert_lock_strong(SpaceManager::expand_lock());
1998   // Don't use a VirtualSpaceListIterator because this
1999   // list is being changed and a straightforward use of an iterator is not safe.
2000   VirtualSpaceNode* purged_vsl = NULL;
2001   VirtualSpaceNode* prev_vsl = virtual_space_list();
2002   VirtualSpaceNode* next_vsl = prev_vsl;
2003   while (next_vsl != NULL) {
2004     VirtualSpaceNode* vsl = next_vsl;
2005     DEBUG_ONLY(vsl->verify_container_count();)
2006     next_vsl = vsl->next();
2007     // Don't free the current virtual space since it will likely
2008     // be needed soon.
2009     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2010       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2011                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2012       // Unlink it from the list
2013       if (prev_vsl == vsl) {
2014         // This is the case of the current node being the first node.
2015         assert(vsl == virtual_space_list(), "Expected to be the first node");
2016         set_virtual_space_list(vsl->next());
2017       } else {
2018         prev_vsl->set_next(vsl->next());
2019       }
2020 
2021       vsl->purge(chunk_manager);
2022       dec_reserved_words(vsl->reserved_words());
2023       dec_committed_words(vsl->committed_words());
2024       dec_virtual_space_count();
2025       purged_vsl = vsl;
2026       delete vsl;
2027     } else {
2028       prev_vsl = vsl;
2029     }
2030   }
2031 #ifdef ASSERT
2032   if (purged_vsl != NULL) {
2033     // List should be stable enough to use an iterator here.
2034     VirtualSpaceListIterator iter(virtual_space_list());
2035     while (iter.repeat()) {
2036       VirtualSpaceNode* vsl = iter.get_next();
2037       assert(vsl != purged_vsl, "Purge of vsl failed");
2038     }
2039   }
2040 #endif
2041 }
2042 
2043 
2044 // This function looks at the mmap regions in the metaspace without locking.
2045 // The chunks are added with store ordering and not deleted except for at
2046 // unloading time during a safepoint.
2047 bool VirtualSpaceList::contains(const void* ptr) {
2048   // List should be stable enough to use an iterator here because removing virtual
2049   // space nodes is only allowed at a safepoint.
2050   VirtualSpaceListIterator iter(virtual_space_list());
2051   while (iter.repeat()) {
2052     VirtualSpaceNode* vsn = iter.get_next();
2053     if (vsn->contains(ptr)) {
2054       return true;
2055     }
2056   }
2057   return false;
2058 }
2059 
2060 void VirtualSpaceList::retire_current_virtual_space() {
2061   assert_lock_strong(SpaceManager::expand_lock());
2062 
2063   VirtualSpaceNode* vsn = current_virtual_space();
2064 
2065   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2066                                   Metaspace::chunk_manager_metadata();
2067 
2068   vsn->retire(cm);
2069 }
2070 
2071 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2072   DEBUG_ONLY(verify_container_count();)
2073   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2074   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2075     ChunkIndex index = (ChunkIndex)i;
2076     size_t chunk_size = chunk_manager->size_by_index(index);
2077 
2078     while (free_words_in_vs() >= chunk_size) {
2079       Metachunk* chunk = get_chunk_vs(chunk_size);
2080       // Chunk will be allocated aligned, so allocation may require
2081       // additional padding chunks. That may cause above allocation to
2082       // fail. Just ignore the failed allocation and continue with the
2083       // next smaller chunk size. As the VirtualSpaceNode comitted
2084       // size should be a multiple of the smallest chunk size, we
2085       // should always be able to fill the VirtualSpace completely.
2086       if (chunk == NULL) {
2087         break;
2088       }
2089       chunk_manager->return_single_chunk(index, chunk);
2090     }
2091     DEBUG_ONLY(verify_container_count();)
2092   }
2093   assert(free_words_in_vs() == 0, "should be empty now");
2094 }
2095 
2096 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2097                                    _is_class(false),
2098                                    _virtual_space_list(NULL),
2099                                    _current_virtual_space(NULL),
2100                                    _reserved_words(0),
2101                                    _committed_words(0),
2102                                    _virtual_space_count(0) {
2103   MutexLockerEx cl(SpaceManager::expand_lock(),
2104                    Mutex::_no_safepoint_check_flag);
2105   create_new_virtual_space(word_size);
2106 }
2107 
2108 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2109                                    _is_class(true),
2110                                    _virtual_space_list(NULL),
2111                                    _current_virtual_space(NULL),
2112                                    _reserved_words(0),
2113                                    _committed_words(0),
2114                                    _virtual_space_count(0) {
2115   MutexLockerEx cl(SpaceManager::expand_lock(),
2116                    Mutex::_no_safepoint_check_flag);
2117   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2118   bool succeeded = class_entry->initialize();
2119   if (succeeded) {
2120     link_vs(class_entry);
2121   }
2122 }
2123 
2124 size_t VirtualSpaceList::free_bytes() {
2125   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2126 }
2127 
2128 // Allocate another meta virtual space and add it to the list.
2129 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2130   assert_lock_strong(SpaceManager::expand_lock());
2131 
2132   if (is_class()) {
2133     assert(false, "We currently don't support more than one VirtualSpace for"
2134                   " the compressed class space. The initialization of the"
2135                   " CCS uses another code path and should not hit this path.");
2136     return false;
2137   }
2138 
2139   if (vs_word_size == 0) {
2140     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2141     return false;
2142   }
2143 
2144   // Reserve the space
2145   size_t vs_byte_size = vs_word_size * BytesPerWord;
2146   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2147 
2148   // Allocate the meta virtual space and initialize it.
2149   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2150   if (!new_entry->initialize()) {
2151     delete new_entry;
2152     return false;
2153   } else {
2154     assert(new_entry->reserved_words() == vs_word_size,
2155         "Reserved memory size differs from requested memory size");
2156     // ensure lock-free iteration sees fully initialized node
2157     OrderAccess::storestore();
2158     link_vs(new_entry);
2159     return true;
2160   }
2161 }
2162 
2163 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2164   if (virtual_space_list() == NULL) {
2165       set_virtual_space_list(new_entry);
2166   } else {
2167     current_virtual_space()->set_next(new_entry);
2168   }
2169   set_current_virtual_space(new_entry);
2170   inc_reserved_words(new_entry->reserved_words());
2171   inc_committed_words(new_entry->committed_words());
2172   inc_virtual_space_count();
2173 #ifdef ASSERT
2174   new_entry->mangle();
2175 #endif
2176   LogTarget(Trace, gc, metaspace) lt;
2177   if (lt.is_enabled()) {
2178     LogStream ls(lt);
2179     VirtualSpaceNode* vsl = current_virtual_space();
2180     ResourceMark rm;
2181     vsl->print_on(&ls);
2182   }
2183 }
2184 
2185 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2186                                       size_t min_words,
2187                                       size_t preferred_words) {
2188   size_t before = node->committed_words();
2189 
2190   bool result = node->expand_by(min_words, preferred_words);
2191 
2192   size_t after = node->committed_words();
2193 
2194   // after and before can be the same if the memory was pre-committed.
2195   assert(after >= before, "Inconsistency");
2196   inc_committed_words(after - before);
2197 
2198   return result;
2199 }
2200 
2201 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2202   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2203   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2204   assert(min_words <= preferred_words, "Invalid arguments");
2205 
2206   const char* const class_or_not = (is_class() ? "class" : "non-class");
2207 
2208   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2209     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2210               class_or_not);
2211     return  false;
2212   }
2213 
2214   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2215   if (allowed_expansion_words < min_words) {
2216     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2217               class_or_not);
2218     return false;
2219   }
2220 
2221   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2222 
2223   // Commit more memory from the the current virtual space.
2224   bool vs_expanded = expand_node_by(current_virtual_space(),
2225                                     min_words,
2226                                     max_expansion_words);
2227   if (vs_expanded) {
2228      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2229                class_or_not);
2230      return true;
2231   }
2232   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2233             class_or_not);
2234   retire_current_virtual_space();
2235 
2236   // Get another virtual space.
2237   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2238   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2239 
2240   if (create_new_virtual_space(grow_vs_words)) {
2241     if (current_virtual_space()->is_pre_committed()) {
2242       // The memory was pre-committed, so we are done here.
2243       assert(min_words <= current_virtual_space()->committed_words(),
2244           "The new VirtualSpace was pre-committed, so it"
2245           "should be large enough to fit the alloc request.");
2246       return true;
2247     }
2248 
2249     return expand_node_by(current_virtual_space(),
2250                           min_words,
2251                           max_expansion_words);
2252   }
2253 
2254   return false;
2255 }
2256 
2257 // Given a chunk, calculate the largest possible padding space which
2258 // could be required when allocating it.
2259 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2260   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2261   if (chunk_type != HumongousIndex) {
2262     // Normal, non-humongous chunks are allocated at chunk size
2263     // boundaries, so the largest padding space required would be that
2264     // minus the smallest chunk size.
2265     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2266     return chunk_word_size - smallest_chunk_size;
2267   } else {
2268     // Humongous chunks are allocated at smallest-chunksize
2269     // boundaries, so there is no padding required.
2270     return 0;
2271   }
2272 }
2273 
2274 
2275 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2276 
2277   // Allocate a chunk out of the current virtual space.
2278   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2279 
2280   if (next != NULL) {
2281     return next;
2282   }
2283 
2284   // The expand amount is currently only determined by the requested sizes
2285   // and not how much committed memory is left in the current virtual space.
2286 
2287   // We must have enough space for the requested size and any
2288   // additional reqired padding chunks.
2289   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2290 
2291   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2292   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2293   if (min_word_size >= preferred_word_size) {
2294     // Can happen when humongous chunks are allocated.
2295     preferred_word_size = min_word_size;
2296   }
2297 
2298   bool expanded = expand_by(min_word_size, preferred_word_size);
2299   if (expanded) {
2300     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2301     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2302   }
2303 
2304    return next;
2305 }
2306 
2307 void VirtualSpaceList::print_on(outputStream* st) const {
2308   VirtualSpaceListIterator iter(virtual_space_list());
2309   while (iter.repeat()) {
2310     VirtualSpaceNode* node = iter.get_next();
2311     node->print_on(st);
2312   }
2313 }
2314 
2315 void VirtualSpaceList::print_map(outputStream* st) const {
2316   VirtualSpaceNode* list = virtual_space_list();
2317   VirtualSpaceListIterator iter(list);
2318   unsigned i = 0;
2319   while (iter.repeat()) {
2320     st->print_cr("Node %u:", i);
2321     VirtualSpaceNode* node = iter.get_next();
2322     node->print_map(st, this->is_class());
2323     i ++;
2324   }
2325 }
2326 
2327 // MetaspaceGC methods
2328 
2329 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2330 // Within the VM operation after the GC the attempt to allocate the metadata
2331 // should succeed.  If the GC did not free enough space for the metaspace
2332 // allocation, the HWM is increased so that another virtualspace will be
2333 // allocated for the metadata.  With perm gen the increase in the perm
2334 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2335 // metaspace policy uses those as the small and large steps for the HWM.
2336 //
2337 // After the GC the compute_new_size() for MetaspaceGC is called to
2338 // resize the capacity of the metaspaces.  The current implementation
2339 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2340 // to resize the Java heap by some GC's.  New flags can be implemented
2341 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2342 // free space is desirable in the metaspace capacity to decide how much
2343 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2344 // free space is desirable in the metaspace capacity before decreasing
2345 // the HWM.
2346 
2347 // Calculate the amount to increase the high water mark (HWM).
2348 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2349 // another expansion is not requested too soon.  If that is not
2350 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2351 // If that is still not enough, expand by the size of the allocation
2352 // plus some.
2353 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2354   size_t min_delta = MinMetaspaceExpansion;
2355   size_t max_delta = MaxMetaspaceExpansion;
2356   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2357 
2358   if (delta <= min_delta) {
2359     delta = min_delta;
2360   } else if (delta <= max_delta) {
2361     // Don't want to hit the high water mark on the next
2362     // allocation so make the delta greater than just enough
2363     // for this allocation.
2364     delta = max_delta;
2365   } else {
2366     // This allocation is large but the next ones are probably not
2367     // so increase by the minimum.
2368     delta = delta + min_delta;
2369   }
2370 
2371   assert_is_aligned(delta, Metaspace::commit_alignment());
2372 
2373   return delta;
2374 }
2375 
2376 size_t MetaspaceGC::capacity_until_GC() {
2377   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2378   assert(value >= MetaspaceSize, "Not initialized properly?");
2379   return value;
2380 }
2381 
2382 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2383   assert_is_aligned(v, Metaspace::commit_alignment());
2384 
2385   intptr_t capacity_until_GC = _capacity_until_GC;
2386   intptr_t new_value = capacity_until_GC + v;
2387 
2388   if (new_value < capacity_until_GC) {
2389     // The addition wrapped around, set new_value to aligned max value.
2390     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2391   }
2392 
2393   intptr_t expected = _capacity_until_GC;
2394   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2395 
2396   if (expected != actual) {
2397     return false;
2398   }
2399 
2400   if (new_cap_until_GC != NULL) {
2401     *new_cap_until_GC = new_value;
2402   }
2403   if (old_cap_until_GC != NULL) {
2404     *old_cap_until_GC = capacity_until_GC;
2405   }
2406   return true;
2407 }
2408 
2409 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2410   assert_is_aligned(v, Metaspace::commit_alignment());
2411 
2412   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2413 }
2414 
2415 void MetaspaceGC::initialize() {
2416   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2417   // we can't do a GC during initialization.
2418   _capacity_until_GC = MaxMetaspaceSize;
2419 }
2420 
2421 void MetaspaceGC::post_initialize() {
2422   // Reset the high-water mark once the VM initialization is done.
2423   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2424 }
2425 
2426 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2427   // Check if the compressed class space is full.
2428   if (is_class && Metaspace::using_class_space()) {
2429     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2430     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2431       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2432                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2433       return false;
2434     }
2435   }
2436 
2437   // Check if the user has imposed a limit on the metaspace memory.
2438   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2439   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2440     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2441               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2442     return false;
2443   }
2444 
2445   return true;
2446 }
2447 
2448 size_t MetaspaceGC::allowed_expansion() {
2449   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2450   size_t capacity_until_gc = capacity_until_GC();
2451 
2452   assert(capacity_until_gc >= committed_bytes,
2453          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2454          capacity_until_gc, committed_bytes);
2455 
2456   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2457   size_t left_until_GC = capacity_until_gc - committed_bytes;
2458   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2459   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2460             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2461             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2462 
2463   return left_to_commit / BytesPerWord;
2464 }
2465 
2466 void MetaspaceGC::compute_new_size() {
2467   assert(_shrink_factor <= 100, "invalid shrink factor");
2468   uint current_shrink_factor = _shrink_factor;
2469   _shrink_factor = 0;
2470 
2471   // Using committed_bytes() for used_after_gc is an overestimation, since the
2472   // chunk free lists are included in committed_bytes() and the memory in an
2473   // un-fragmented chunk free list is available for future allocations.
2474   // However, if the chunk free lists becomes fragmented, then the memory may
2475   // not be available for future allocations and the memory is therefore "in use".
2476   // Including the chunk free lists in the definition of "in use" is therefore
2477   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2478   // shrink below committed_bytes() and this has caused serious bugs in the past.
2479   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2480   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2481 
2482   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2483   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2484 
2485   const double min_tmp = used_after_gc / maximum_used_percentage;
2486   size_t minimum_desired_capacity =
2487     (size_t)MIN2(min_tmp, double(max_uintx));
2488   // Don't shrink less than the initial generation size
2489   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2490                                   MetaspaceSize);
2491 
2492   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2493   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2494                            minimum_free_percentage, maximum_used_percentage);
2495   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2496 
2497 
2498   size_t shrink_bytes = 0;
2499   if (capacity_until_GC < minimum_desired_capacity) {
2500     // If we have less capacity below the metaspace HWM, then
2501     // increment the HWM.
2502     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2503     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2504     // Don't expand unless it's significant
2505     if (expand_bytes >= MinMetaspaceExpansion) {
2506       size_t new_capacity_until_GC = 0;
2507       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2508       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2509 
2510       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2511                                                new_capacity_until_GC,
2512                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2513       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2514                                minimum_desired_capacity / (double) K,
2515                                expand_bytes / (double) K,
2516                                MinMetaspaceExpansion / (double) K,
2517                                new_capacity_until_GC / (double) K);
2518     }
2519     return;
2520   }
2521 
2522   // No expansion, now see if we want to shrink
2523   // We would never want to shrink more than this
2524   assert(capacity_until_GC >= minimum_desired_capacity,
2525          SIZE_FORMAT " >= " SIZE_FORMAT,
2526          capacity_until_GC, minimum_desired_capacity);
2527   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2528 
2529   // Should shrinking be considered?
2530   if (MaxMetaspaceFreeRatio < 100) {
2531     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2532     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2533     const double max_tmp = used_after_gc / minimum_used_percentage;
2534     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2535     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2536                                     MetaspaceSize);
2537     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2538                              maximum_free_percentage, minimum_used_percentage);
2539     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2540                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2541 
2542     assert(minimum_desired_capacity <= maximum_desired_capacity,
2543            "sanity check");
2544 
2545     if (capacity_until_GC > maximum_desired_capacity) {
2546       // Capacity too large, compute shrinking size
2547       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2548       // We don't want shrink all the way back to initSize if people call
2549       // System.gc(), because some programs do that between "phases" and then
2550       // we'd just have to grow the heap up again for the next phase.  So we
2551       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2552       // on the third call, and 100% by the fourth call.  But if we recompute
2553       // size without shrinking, it goes back to 0%.
2554       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2555 
2556       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2557 
2558       assert(shrink_bytes <= max_shrink_bytes,
2559              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2560              shrink_bytes, max_shrink_bytes);
2561       if (current_shrink_factor == 0) {
2562         _shrink_factor = 10;
2563       } else {
2564         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2565       }
2566       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2567                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2568       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2569                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2570     }
2571   }
2572 
2573   // Don't shrink unless it's significant
2574   if (shrink_bytes >= MinMetaspaceExpansion &&
2575       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2576     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2577     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2578                                              new_capacity_until_GC,
2579                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2580   }
2581 }
2582 
2583 // Metadebug methods
2584 
2585 void Metadebug::init_allocation_fail_alot_count() {
2586   if (MetadataAllocationFailALot) {
2587     _allocation_fail_alot_count =
2588       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2589   }
2590 }
2591 
2592 #ifdef ASSERT
2593 bool Metadebug::test_metadata_failure() {
2594   if (MetadataAllocationFailALot &&
2595       Threads::is_vm_complete()) {
2596     if (_allocation_fail_alot_count > 0) {
2597       _allocation_fail_alot_count--;
2598     } else {
2599       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2600       init_allocation_fail_alot_count();
2601       return true;
2602     }
2603   }
2604   return false;
2605 }
2606 #endif
2607 
2608 // ChunkManager methods
2609 size_t ChunkManager::free_chunks_total_words() {
2610   return _free_chunks_total;
2611 }
2612 
2613 size_t ChunkManager::free_chunks_total_bytes() {
2614   return free_chunks_total_words() * BytesPerWord;
2615 }
2616 
2617 // Update internal accounting after a chunk was added
2618 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2619   assert_lock_strong(SpaceManager::expand_lock());
2620   _free_chunks_count ++;
2621   _free_chunks_total += c->word_size();
2622 }
2623 
2624 // Update internal accounting after a chunk was removed
2625 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2626   assert_lock_strong(SpaceManager::expand_lock());
2627   assert(_free_chunks_count >= 1,
2628     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2629   assert(_free_chunks_total >= c->word_size(),
2630     "ChunkManager::_free_chunks_total: about to go negative"
2631      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2632   _free_chunks_count --;
2633   _free_chunks_total -= c->word_size();
2634 }
2635 
2636 size_t ChunkManager::free_chunks_count() {
2637 #ifdef ASSERT
2638   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2639     MutexLockerEx cl(SpaceManager::expand_lock(),
2640                      Mutex::_no_safepoint_check_flag);
2641     // This lock is only needed in debug because the verification
2642     // of the _free_chunks_totals walks the list of free chunks
2643     slow_locked_verify_free_chunks_count();
2644   }
2645 #endif
2646   return _free_chunks_count;
2647 }
2648 
2649 ChunkIndex ChunkManager::list_index(size_t size) {
2650   return get_chunk_type_by_size(size, is_class());
2651 }
2652 
2653 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2654   index_bounds_check(index);
2655   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2656   return get_size_for_nonhumongous_chunktype(index, is_class());
2657 }
2658 
2659 void ChunkManager::locked_verify_free_chunks_total() {
2660   assert_lock_strong(SpaceManager::expand_lock());
2661   assert(sum_free_chunks() == _free_chunks_total,
2662          "_free_chunks_total " SIZE_FORMAT " is not the"
2663          " same as sum " SIZE_FORMAT, _free_chunks_total,
2664          sum_free_chunks());
2665 }
2666 
2667 void ChunkManager::verify_free_chunks_total() {
2668   MutexLockerEx cl(SpaceManager::expand_lock(),
2669                      Mutex::_no_safepoint_check_flag);
2670   locked_verify_free_chunks_total();
2671 }
2672 
2673 void ChunkManager::locked_verify_free_chunks_count() {
2674   assert_lock_strong(SpaceManager::expand_lock());
2675   assert(sum_free_chunks_count() == _free_chunks_count,
2676          "_free_chunks_count " SIZE_FORMAT " is not the"
2677          " same as sum " SIZE_FORMAT, _free_chunks_count,
2678          sum_free_chunks_count());
2679 }
2680 
2681 void ChunkManager::verify_free_chunks_count() {
2682 #ifdef ASSERT
2683   MutexLockerEx cl(SpaceManager::expand_lock(),
2684                      Mutex::_no_safepoint_check_flag);
2685   locked_verify_free_chunks_count();
2686 #endif
2687 }
2688 
2689 void ChunkManager::verify() {
2690   MutexLockerEx cl(SpaceManager::expand_lock(),
2691                      Mutex::_no_safepoint_check_flag);
2692   locked_verify();
2693 }
2694 
2695 void ChunkManager::locked_verify() {
2696   locked_verify_free_chunks_count();
2697   locked_verify_free_chunks_total();
2698   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2699     ChunkList* list = free_chunks(i);
2700     if (list != NULL) {
2701       Metachunk* chunk = list->head();
2702       while (chunk) {
2703         DEBUG_ONLY(do_verify_chunk(chunk);)
2704         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2705         chunk = chunk->next();
2706       }
2707     }
2708   }
2709 }
2710 
2711 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2712   assert_lock_strong(SpaceManager::expand_lock());
2713   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2714                 _free_chunks_total, _free_chunks_count);
2715 }
2716 
2717 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2718   assert_lock_strong(SpaceManager::expand_lock());
2719   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2720                 sum_free_chunks(), sum_free_chunks_count());
2721 }
2722 
2723 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2724   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2725          "Bad index: %d", (int)index);
2726 
2727   return &_free_chunks[index];
2728 }
2729 
2730 // These methods that sum the free chunk lists are used in printing
2731 // methods that are used in product builds.
2732 size_t ChunkManager::sum_free_chunks() {
2733   assert_lock_strong(SpaceManager::expand_lock());
2734   size_t result = 0;
2735   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2736     ChunkList* list = free_chunks(i);
2737 
2738     if (list == NULL) {
2739       continue;
2740     }
2741 
2742     result = result + list->count() * list->size();
2743   }
2744   result = result + humongous_dictionary()->total_size();
2745   return result;
2746 }
2747 
2748 size_t ChunkManager::sum_free_chunks_count() {
2749   assert_lock_strong(SpaceManager::expand_lock());
2750   size_t count = 0;
2751   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2752     ChunkList* list = free_chunks(i);
2753     if (list == NULL) {
2754       continue;
2755     }
2756     count = count + list->count();
2757   }
2758   count = count + humongous_dictionary()->total_free_blocks();
2759   return count;
2760 }
2761 
2762 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2763   ChunkIndex index = list_index(word_size);
2764   assert(index < HumongousIndex, "No humongous list");
2765   return free_chunks(index);
2766 }
2767 
2768 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2769 // split up the larger chunk into n smaller chunks, at least one of which should be
2770 // the target chunk of target chunk size. The smaller chunks, including the target
2771 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2772 // Note that this chunk is supposed to be removed from the freelist right away.
2773 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2774   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2775 
2776   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2777   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2778 
2779   MetaWord* const region_start = (MetaWord*)larger_chunk;
2780   const size_t region_word_len = larger_chunk->word_size();
2781   MetaWord* const region_end = region_start + region_word_len;
2782   VirtualSpaceNode* const vsn = larger_chunk->container();
2783   OccupancyMap* const ocmap = vsn->occupancy_map();
2784 
2785   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2786   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2787   // at an address suitable to place the smaller target chunk.
2788   assert_is_aligned(region_start, target_chunk_word_size);
2789 
2790   // Remove old chunk.
2791   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2792   larger_chunk->remove_sentinel();
2793 
2794   // Prevent access to the old chunk from here on.
2795   larger_chunk = NULL;
2796   // ... and wipe it.
2797   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2798 
2799   // In its place create first the target chunk...
2800   MetaWord* p = region_start;
2801   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2802   assert(target_chunk == (Metachunk*)p, "Sanity");
2803   target_chunk->set_origin(origin_split);
2804 
2805   // Note: we do not need to mark its start in the occupancy map
2806   // because it coincides with the old chunk start.
2807 
2808   // Mark chunk as free and return to the freelist.
2809   do_update_in_use_info_for_chunk(target_chunk, false);
2810   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2811 
2812   // This chunk should now be valid and can be verified.
2813   DEBUG_ONLY(do_verify_chunk(target_chunk));
2814 
2815   // In the remaining space create the remainder chunks.
2816   p += target_chunk->word_size();
2817   assert(p < region_end, "Sanity");
2818 
2819   while (p < region_end) {
2820 
2821     // Find the largest chunk size which fits the alignment requirements at address p.
2822     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2823     size_t this_chunk_word_size = 0;
2824     for(;;) {
2825       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2826       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2827         break;
2828       } else {
2829         this_chunk_index = prev_chunk_index(this_chunk_index);
2830         assert(this_chunk_index >= target_chunk_index, "Sanity");
2831       }
2832     }
2833 
2834     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2835     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2836     assert(p + this_chunk_word_size <= region_end, "Sanity");
2837 
2838     // Create splitting chunk.
2839     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2840     assert(this_chunk == (Metachunk*)p, "Sanity");
2841     this_chunk->set_origin(origin_split);
2842     ocmap->set_chunk_starts_at_address(p, true);
2843     do_update_in_use_info_for_chunk(this_chunk, false);
2844 
2845     // This chunk should be valid and can be verified.
2846     DEBUG_ONLY(do_verify_chunk(this_chunk));
2847 
2848     // Return this chunk to freelist and correct counter.
2849     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2850     _free_chunks_count ++;
2851 
2852     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2853       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2854       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2855       p2i(region_start), p2i(region_end));
2856 
2857     p += this_chunk_word_size;
2858 
2859   }
2860 
2861   return target_chunk;
2862 }
2863 
2864 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2865   assert_lock_strong(SpaceManager::expand_lock());
2866 
2867   slow_locked_verify();
2868 
2869   Metachunk* chunk = NULL;
2870   bool we_did_split_a_chunk = false;
2871 
2872   if (list_index(word_size) != HumongousIndex) {
2873 
2874     ChunkList* free_list = find_free_chunks_list(word_size);
2875     assert(free_list != NULL, "Sanity check");
2876 
2877     chunk = free_list->head();
2878 
2879     if (chunk == NULL) {
2880       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2881       // This is the counterpart of the coalescing-upon-chunk-return.
2882 
2883       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2884 
2885       // Is there a larger chunk we could split?
2886       Metachunk* larger_chunk = NULL;
2887       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2888       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2889         larger_chunk = free_chunks(larger_chunk_index)->head();
2890         if (larger_chunk == NULL) {
2891           larger_chunk_index = next_chunk_index(larger_chunk_index);
2892         }
2893       }
2894 
2895       if (larger_chunk != NULL) {
2896         assert(larger_chunk->word_size() > word_size, "Sanity");
2897         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2898 
2899         // We found a larger chunk. Lets split it up:
2900         // - remove old chunk
2901         // - in its place, create new smaller chunks, with at least one chunk
2902         //   being of target size, the others sized as large as possible. This
2903         //   is to make sure the resulting chunks are "as coalesced as possible"
2904         //   (similar to VirtualSpaceNode::retire()).
2905         // Note: during this operation both ChunkManager and VirtualSpaceNode
2906         //  are temporarily invalid, so be careful with asserts.
2907 
2908         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2909            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2910           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2911           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2912 
2913         chunk = split_chunk(word_size, larger_chunk);
2914 
2915         // This should have worked.
2916         assert(chunk != NULL, "Sanity");
2917         assert(chunk->word_size() == word_size, "Sanity");
2918         assert(chunk->is_tagged_free(), "Sanity");
2919 
2920         we_did_split_a_chunk = true;
2921 
2922       }
2923     }
2924 
2925     if (chunk == NULL) {
2926       return NULL;
2927     }
2928 
2929     // Remove the chunk as the head of the list.
2930     free_list->remove_chunk(chunk);
2931 
2932     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2933                                        p2i(free_list), free_list->count());
2934 
2935   } else {
2936     chunk = humongous_dictionary()->get_chunk(word_size);
2937 
2938     if (chunk == NULL) {
2939       return NULL;
2940     }
2941 
2942     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2943                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2944   }
2945 
2946   // Chunk has been removed from the chunk manager; update counters.
2947   account_for_removed_chunk(chunk);
2948   do_update_in_use_info_for_chunk(chunk, true);
2949   chunk->container()->inc_container_count();
2950   chunk->inc_use_count();
2951 
2952   // Remove it from the links to this freelist
2953   chunk->set_next(NULL);
2954   chunk->set_prev(NULL);
2955 
2956   // Run some verifications (some more if we did a chunk split)
2957 #ifdef ASSERT
2958   if (VerifyMetaspace) {
2959     locked_verify();
2960     VirtualSpaceNode* const vsn = chunk->container();
2961     vsn->verify();
2962     if (we_did_split_a_chunk) {
2963       vsn->verify_free_chunks_are_ideally_merged();
2964     }
2965   }
2966 #endif
2967 
2968   return chunk;
2969 }
2970 
2971 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2972   assert_lock_strong(SpaceManager::expand_lock());
2973   slow_locked_verify();
2974 
2975   // Take from the beginning of the list
2976   Metachunk* chunk = free_chunks_get(word_size);
2977   if (chunk == NULL) {
2978     return NULL;
2979   }
2980 
2981   assert((word_size <= chunk->word_size()) ||
2982          (list_index(chunk->word_size()) == HumongousIndex),
2983          "Non-humongous variable sized chunk");
2984   LogTarget(Debug, gc, metaspace, freelist) lt;
2985   if (lt.is_enabled()) {
2986     size_t list_count;
2987     if (list_index(word_size) < HumongousIndex) {
2988       ChunkList* list = find_free_chunks_list(word_size);
2989       list_count = list->count();
2990     } else {
2991       list_count = humongous_dictionary()->total_count();
2992     }
2993     LogStream ls(lt);
2994     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2995              p2i(this), p2i(chunk), chunk->word_size(), list_count);
2996     ResourceMark rm;
2997     locked_print_free_chunks(&ls);
2998   }
2999 
3000   return chunk;
3001 }
3002 
3003 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3004   assert_lock_strong(SpaceManager::expand_lock());
3005   DEBUG_ONLY(do_verify_chunk(chunk);)
3006   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3007   assert(chunk != NULL, "Expected chunk.");
3008   assert(chunk->container() != NULL, "Container should have been set.");
3009   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3010   index_bounds_check(index);
3011 
3012   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3013   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3014   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3015   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3016 
3017   if (index != HumongousIndex) {
3018     // Return non-humongous chunk to freelist.
3019     ChunkList* list = free_chunks(index);
3020     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3021     list->return_chunk_at_head(chunk);
3022     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3023         chunk_size_name(index), p2i(chunk));
3024   } else {
3025     // Return humongous chunk to dictionary.
3026     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3027     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3028            "Humongous chunk has wrong alignment.");
3029     _humongous_dictionary.return_chunk(chunk);
3030     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3031         chunk_size_name(index), p2i(chunk), chunk->word_size());
3032   }
3033   chunk->container()->dec_container_count();
3034   do_update_in_use_info_for_chunk(chunk, false);
3035 
3036   // Chunk has been added; update counters.
3037   account_for_added_chunk(chunk);
3038 
3039   // Attempt coalesce returned chunks with its neighboring chunks:
3040   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3041   if (index == SmallIndex || index == SpecializedIndex) {
3042     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3043       // This did not work. But if this chunk is special, we still may form a small chunk?
3044       if (index == SpecializedIndex) {
3045         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3046           // give up.
3047         }
3048       }
3049     }
3050   }
3051 
3052 }
3053 
3054 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3055   index_bounds_check(index);
3056   if (chunks == NULL) {
3057     return;
3058   }
3059   LogTarget(Trace, gc, metaspace, freelist) log;
3060   if (log.is_enabled()) { // tracing
3061     log.print("returning list of %s chunks...", chunk_size_name(index));
3062   }
3063   unsigned num_chunks_returned = 0;
3064   size_t size_chunks_returned = 0;
3065   Metachunk* cur = chunks;
3066   while (cur != NULL) {
3067     // Capture the next link before it is changed
3068     // by the call to return_chunk_at_head();
3069     Metachunk* next = cur->next();
3070     if (log.is_enabled()) { // tracing
3071       num_chunks_returned ++;
3072       size_chunks_returned += cur->word_size();
3073     }
3074     return_single_chunk(index, cur);
3075     cur = next;
3076   }
3077   if (log.is_enabled()) { // tracing
3078     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3079         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3080     if (index != HumongousIndex) {
3081       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3082     } else {
3083       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3084     }
3085   }
3086 }
3087 
3088 void ChunkManager::print_on(outputStream* out) const {
3089   _humongous_dictionary.report_statistics(out);
3090 }
3091 
3092 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3093   assert_lock_strong(SpaceManager::expand_lock());
3094   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3095     stat->num_by_type[i] = num_free_chunks(i);
3096     stat->single_size_by_type[i] = size_by_index(i);
3097     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3098   }
3099   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3100   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3101 }
3102 
3103 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3104   MutexLockerEx cl(SpaceManager::expand_lock(),
3105                    Mutex::_no_safepoint_check_flag);
3106   locked_get_statistics(stat);
3107 }
3108 
3109 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3110   size_t total = 0;
3111   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3112 
3113   const char* unit = scale_unit(scale);
3114   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3115     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3116                    stat->num_by_type[i], chunk_size_name(i),
3117                    stat->single_size_by_type[i]);
3118     if (scale == 1) {
3119       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3120     } else {
3121       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3122     }
3123 
3124     total += stat->total_size_by_type[i];
3125   }
3126 
3127 
3128   total += stat->total_size_humongous_chunks;
3129 
3130   if (scale == 1) {
3131     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3132     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3133 
3134     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3135   } else {
3136     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3137     stat->num_humongous_chunks,
3138     (float)stat->total_size_humongous_chunks / scale, unit);
3139 
3140     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3141   }
3142 
3143 }
3144 
3145 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3146   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3147 
3148   // Note: keep lock protection only to retrieving statistics; keep printing
3149   // out of lock protection
3150   ChunkManagerStatistics stat;
3151   out->print_cr("Chunkmanager (non-class):");
3152   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3153   if (non_class_cm != NULL) {
3154     non_class_cm->get_statistics(&stat);
3155     ChunkManager::print_statistics(&stat, out, scale);
3156   } else {
3157     out->print_cr("unavailable.");
3158   }
3159   out->print_cr("Chunkmanager (class):");
3160   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3161   if (class_cm != NULL) {
3162     class_cm->get_statistics(&stat);
3163     ChunkManager::print_statistics(&stat, out, scale);
3164   } else {
3165     out->print_cr("unavailable.");
3166   }
3167 }
3168 
3169 // SpaceManager methods
3170 
3171 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3172   size_t chunk_sizes[] = {
3173       specialized_chunk_size(is_class_space),
3174       small_chunk_size(is_class_space),
3175       medium_chunk_size(is_class_space)
3176   };
3177 
3178   // Adjust up to one of the fixed chunk sizes ...
3179   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3180     if (requested <= chunk_sizes[i]) {
3181       return chunk_sizes[i];
3182     }
3183   }
3184 
3185   // ... or return the size as a humongous chunk.
3186   return requested;
3187 }
3188 
3189 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3190   return adjust_initial_chunk_size(requested, is_class());
3191 }
3192 
3193 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3194   size_t requested;
3195 
3196   if (is_class()) {
3197     switch (type) {
3198     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3199     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3200     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3201     default:                                 requested = ClassSmallChunk; break;
3202     }
3203   } else {
3204     switch (type) {
3205     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3206     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3207     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3208     default:                                 requested = SmallChunk; break;
3209     }
3210   }
3211 
3212   // Adjust to one of the fixed chunk sizes (unless humongous)
3213   const size_t adjusted = adjust_initial_chunk_size(requested);
3214 
3215   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3216          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3217 
3218   return adjusted;
3219 }
3220 
3221 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3222   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3223   size_t free = 0;
3224   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3225     Metachunk* chunk = chunks_in_use(i);
3226     while (chunk != NULL) {
3227       free += chunk->free_word_size();
3228       chunk = chunk->next();
3229     }
3230   }
3231   return free;
3232 }
3233 
3234 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3235   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3236   size_t result = 0;
3237   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3238    result += sum_waste_in_chunks_in_use(i);
3239   }
3240 
3241   return result;
3242 }
3243 
3244 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3245   size_t result = 0;
3246   Metachunk* chunk = chunks_in_use(index);
3247   // Count the free space in all the chunk but not the
3248   // current chunk from which allocations are still being done.
3249   while (chunk != NULL) {
3250     if (chunk != current_chunk()) {
3251       result += chunk->free_word_size();
3252     }
3253     chunk = chunk->next();
3254   }
3255   return result;
3256 }
3257 
3258 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3259   // For CMS use "allocated_chunks_words()" which does not need the
3260   // Metaspace lock.  For the other collectors sum over the
3261   // lists.  Use both methods as a check that "allocated_chunks_words()"
3262   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3263   // to use in the product and allocated_chunks_words() should be used
3264   // but allow for  checking that allocated_chunks_words() returns the same
3265   // value as sum_capacity_in_chunks_in_use() which is the definitive
3266   // answer.
3267   if (UseConcMarkSweepGC) {
3268     return allocated_chunks_words();
3269   } else {
3270     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3271     size_t sum = 0;
3272     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3273       Metachunk* chunk = chunks_in_use(i);
3274       while (chunk != NULL) {
3275         sum += chunk->word_size();
3276         chunk = chunk->next();
3277       }
3278     }
3279   return sum;
3280   }
3281 }
3282 
3283 size_t SpaceManager::sum_count_in_chunks_in_use() {
3284   size_t count = 0;
3285   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3286     count = count + sum_count_in_chunks_in_use(i);
3287   }
3288 
3289   return count;
3290 }
3291 
3292 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3293   size_t count = 0;
3294   Metachunk* chunk = chunks_in_use(i);
3295   while (chunk != NULL) {
3296     count++;
3297     chunk = chunk->next();
3298   }
3299   return count;
3300 }
3301 
3302 
3303 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3304   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3305   size_t used = 0;
3306   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3307     Metachunk* chunk = chunks_in_use(i);
3308     while (chunk != NULL) {
3309       used += chunk->used_word_size();
3310       chunk = chunk->next();
3311     }
3312   }
3313   return used;
3314 }
3315 
3316 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3317 
3318   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3319     Metachunk* chunk = chunks_in_use(i);
3320     st->print("SpaceManager: %s " PTR_FORMAT,
3321                  chunk_size_name(i), p2i(chunk));
3322     if (chunk != NULL) {
3323       st->print_cr(" free " SIZE_FORMAT,
3324                    chunk->free_word_size());
3325     } else {
3326       st->cr();
3327     }
3328   }
3329 
3330   chunk_manager()->locked_print_free_chunks(st);
3331   chunk_manager()->locked_print_sum_free_chunks(st);
3332 }
3333 
3334 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3335 
3336   // Decide between a small chunk and a medium chunk.  Up to
3337   // _small_chunk_limit small chunks can be allocated.
3338   // After that a medium chunk is preferred.
3339   size_t chunk_word_size;
3340 
3341   // Special case for anonymous metadata space.
3342   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3343   // rarely about 4K (64-bits JVM).
3344   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3345   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3346   // reduces space waste from 60+% to around 30%.
3347   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3348       _mdtype == Metaspace::NonClassType &&
3349       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3350       word_size + Metachunk::overhead() <= SpecializedChunk) {
3351     return SpecializedChunk;
3352   }
3353 
3354   if (chunks_in_use(MediumIndex) == NULL &&
3355       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3356     chunk_word_size = (size_t) small_chunk_size();
3357     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3358       chunk_word_size = medium_chunk_size();
3359     }
3360   } else {
3361     chunk_word_size = medium_chunk_size();
3362   }
3363 
3364   // Might still need a humongous chunk.  Enforce
3365   // humongous allocations sizes to be aligned up to
3366   // the smallest chunk size.
3367   size_t if_humongous_sized_chunk =
3368     align_up(word_size + Metachunk::overhead(),
3369                   smallest_chunk_size());
3370   chunk_word_size =
3371     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3372 
3373   assert(!SpaceManager::is_humongous(word_size) ||
3374          chunk_word_size == if_humongous_sized_chunk,
3375          "Size calculation is wrong, word_size " SIZE_FORMAT
3376          " chunk_word_size " SIZE_FORMAT,
3377          word_size, chunk_word_size);
3378   Log(gc, metaspace, alloc) log;
3379   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3380     log.debug("Metadata humongous allocation:");
3381     log.debug("  word_size " PTR_FORMAT, word_size);
3382     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3383     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3384   }
3385   return chunk_word_size;
3386 }
3387 
3388 void SpaceManager::track_metaspace_memory_usage() {
3389   if (is_init_completed()) {
3390     if (is_class()) {
3391       MemoryService::track_compressed_class_memory_usage();
3392     }
3393     MemoryService::track_metaspace_memory_usage();
3394   }
3395 }
3396 
3397 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3398   assert(vs_list()->current_virtual_space() != NULL,
3399          "Should have been set");
3400   assert(current_chunk() == NULL ||
3401          current_chunk()->allocate(word_size) == NULL,
3402          "Don't need to expand");
3403   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3404 
3405   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3406     size_t words_left = 0;
3407     size_t words_used = 0;
3408     if (current_chunk() != NULL) {
3409       words_left = current_chunk()->free_word_size();
3410       words_used = current_chunk()->used_word_size();
3411     }
3412     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3413                                        word_size, words_used, words_left);
3414   }
3415 
3416   // Get another chunk
3417   size_t chunk_word_size = calc_chunk_size(word_size);
3418   Metachunk* next = get_new_chunk(chunk_word_size);
3419 
3420   MetaWord* mem = NULL;
3421 
3422   // If a chunk was available, add it to the in-use chunk list
3423   // and do an allocation from it.
3424   if (next != NULL) {
3425     // Add to this manager's list of chunks in use.
3426     add_chunk(next, false);
3427     mem = next->allocate(word_size);
3428   }
3429 
3430   // Track metaspace memory usage statistic.
3431   track_metaspace_memory_usage();
3432 
3433   return mem;
3434 }
3435 
3436 void SpaceManager::print_on(outputStream* st) const {
3437 
3438   for (ChunkIndex i = ZeroIndex;
3439        i < NumberOfInUseLists ;
3440        i = next_chunk_index(i) ) {
3441     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3442                  p2i(chunks_in_use(i)),
3443                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3444   }
3445   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3446                " Humongous " SIZE_FORMAT,
3447                sum_waste_in_chunks_in_use(SmallIndex),
3448                sum_waste_in_chunks_in_use(MediumIndex),
3449                sum_waste_in_chunks_in_use(HumongousIndex));
3450   // block free lists
3451   if (block_freelists() != NULL) {
3452     st->print_cr("total in block free lists " SIZE_FORMAT,
3453       block_freelists()->total_size());
3454   }
3455 }
3456 
3457 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3458                            Metaspace::MetaspaceType space_type,
3459                            Mutex* lock) :
3460   _mdtype(mdtype),
3461   _space_type(space_type),
3462   _allocated_blocks_words(0),
3463   _allocated_chunks_words(0),
3464   _allocated_chunks_count(0),
3465   _block_freelists(NULL),
3466   _lock(lock)
3467 {
3468   initialize();
3469 }
3470 
3471 void SpaceManager::inc_size_metrics(size_t words) {
3472   assert_lock_strong(SpaceManager::expand_lock());
3473   // Total of allocated Metachunks and allocated Metachunks count
3474   // for each SpaceManager
3475   _allocated_chunks_words = _allocated_chunks_words + words;
3476   _allocated_chunks_count++;
3477   // Global total of capacity in allocated Metachunks
3478   MetaspaceUtils::inc_capacity(mdtype(), words);
3479   // Global total of allocated Metablocks.
3480   // used_words_slow() includes the overhead in each
3481   // Metachunk so include it in the used when the
3482   // Metachunk is first added (so only added once per
3483   // Metachunk).
3484   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3485 }
3486 
3487 void SpaceManager::inc_used_metrics(size_t words) {
3488   // Add to the per SpaceManager total
3489   Atomic::add(words, &_allocated_blocks_words);
3490   // Add to the global total
3491   MetaspaceUtils::inc_used(mdtype(), words);
3492 }
3493 
3494 void SpaceManager::dec_total_from_size_metrics() {
3495   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3496   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3497   // Also deduct the overhead per Metachunk
3498   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3499 }
3500 
3501 void SpaceManager::initialize() {
3502   Metadebug::init_allocation_fail_alot_count();
3503   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3504     _chunks_in_use[i] = NULL;
3505   }
3506   _current_chunk = NULL;
3507   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3508 }
3509 
3510 SpaceManager::~SpaceManager() {
3511   // This call this->_lock which can't be done while holding expand_lock()
3512   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3513          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3514          " allocated_chunks_words() " SIZE_FORMAT,
3515          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3516 
3517   MutexLockerEx fcl(SpaceManager::expand_lock(),
3518                     Mutex::_no_safepoint_check_flag);
3519 
3520   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3521          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3522          " allocated_chunks_count() " SIZE_FORMAT,
3523          sum_count_in_chunks_in_use(), allocated_chunks_count());
3524 
3525   chunk_manager()->slow_locked_verify();
3526 
3527   dec_total_from_size_metrics();
3528 
3529   Log(gc, metaspace, freelist) log;
3530   if (log.is_trace()) {
3531     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3532     ResourceMark rm;
3533     LogStream ls(log.trace());
3534     locked_print_chunks_in_use_on(&ls);
3535     if (block_freelists() != NULL) {
3536       block_freelists()->print_on(&ls);
3537     }
3538   }
3539 
3540   // Add all the chunks in use by this space manager
3541   // to the global list of free chunks.
3542 
3543   // Follow each list of chunks-in-use and add them to the
3544   // free lists.  Each list is NULL terminated.
3545 
3546   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3547     Metachunk* chunks = chunks_in_use(i);
3548     chunk_manager()->return_chunk_list(i, chunks);
3549     set_chunks_in_use(i, NULL);
3550   }
3551 
3552   chunk_manager()->slow_locked_verify();
3553 
3554   if (_block_freelists != NULL) {
3555     delete _block_freelists;
3556   }
3557 }
3558 
3559 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3560   assert_lock_strong(_lock);
3561   // Allocations and deallocations are in raw_word_size
3562   size_t raw_word_size = get_allocation_word_size(word_size);
3563   // Lazily create a block_freelist
3564   if (block_freelists() == NULL) {
3565     _block_freelists = new BlockFreelist();
3566   }
3567   block_freelists()->return_block(p, raw_word_size);
3568 }
3569 
3570 // Adds a chunk to the list of chunks in use.
3571 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3572 
3573   assert(new_chunk != NULL, "Should not be NULL");
3574   assert(new_chunk->next() == NULL, "Should not be on a list");
3575 
3576   new_chunk->reset_empty();
3577 
3578   // Find the correct list and and set the current
3579   // chunk for that list.
3580   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3581 
3582   if (index != HumongousIndex) {
3583     retire_current_chunk();
3584     set_current_chunk(new_chunk);
3585     new_chunk->set_next(chunks_in_use(index));
3586     set_chunks_in_use(index, new_chunk);
3587   } else {
3588     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3589     // small, so small will be null.  Link this first chunk as the current
3590     // chunk.
3591     if (make_current) {
3592       // Set as the current chunk but otherwise treat as a humongous chunk.
3593       set_current_chunk(new_chunk);
3594     }
3595     // Link at head.  The _current_chunk only points to a humongous chunk for
3596     // the null class loader metaspace (class and data virtual space managers)
3597     // any humongous chunks so will not point to the tail
3598     // of the humongous chunks list.
3599     new_chunk->set_next(chunks_in_use(HumongousIndex));
3600     set_chunks_in_use(HumongousIndex, new_chunk);
3601 
3602     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3603   }
3604 
3605   // Add to the running sum of capacity
3606   inc_size_metrics(new_chunk->word_size());
3607 
3608   assert(new_chunk->is_empty(), "Not ready for reuse");
3609   Log(gc, metaspace, freelist) log;
3610   if (log.is_trace()) {
3611     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3612     ResourceMark rm;
3613     LogStream ls(log.trace());
3614     new_chunk->print_on(&ls);
3615     chunk_manager()->locked_print_free_chunks(&ls);
3616   }
3617 }
3618 
3619 void SpaceManager::retire_current_chunk() {
3620   if (current_chunk() != NULL) {
3621     size_t remaining_words = current_chunk()->free_word_size();
3622     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3623       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3624       deallocate(ptr, remaining_words);
3625       inc_used_metrics(remaining_words);
3626     }
3627   }
3628 }
3629 
3630 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3631   // Get a chunk from the chunk freelist
3632   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3633 
3634   if (next == NULL) {
3635     next = vs_list()->get_new_chunk(chunk_word_size,
3636                                     medium_chunk_bunch());
3637   }
3638 
3639   Log(gc, metaspace, alloc) log;
3640   if (log.is_debug() && next != NULL &&
3641       SpaceManager::is_humongous(next->word_size())) {
3642     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3643   }
3644 
3645   return next;
3646 }
3647 
3648 MetaWord* SpaceManager::allocate(size_t word_size) {
3649   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3650   size_t raw_word_size = get_allocation_word_size(word_size);
3651   BlockFreelist* fl =  block_freelists();
3652   MetaWord* p = NULL;
3653   // Allocation from the dictionary is expensive in the sense that
3654   // the dictionary has to be searched for a size.  Don't allocate
3655   // from the dictionary until it starts to get fat.  Is this
3656   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3657   // for allocations.  Do some profiling.  JJJ
3658   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3659     p = fl->get_block(raw_word_size);
3660   }
3661   if (p == NULL) {
3662     p = allocate_work(raw_word_size);
3663   }
3664 
3665   return p;
3666 }
3667 
3668 // Returns the address of spaced allocated for "word_size".
3669 // This methods does not know about blocks (Metablocks)
3670 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3671   assert_lock_strong(_lock);
3672 #ifdef ASSERT
3673   if (Metadebug::test_metadata_failure()) {
3674     return NULL;
3675   }
3676 #endif
3677   // Is there space in the current chunk?
3678   MetaWord* result = NULL;
3679 
3680   if (current_chunk() != NULL) {
3681     result = current_chunk()->allocate(word_size);
3682   }
3683 
3684   if (result == NULL) {
3685     result = grow_and_allocate(word_size);
3686   }
3687 
3688   if (result != NULL) {
3689     inc_used_metrics(word_size);
3690     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3691            "Head of the list is being allocated");
3692   }
3693 
3694   return result;
3695 }
3696 
3697 void SpaceManager::verify() {
3698   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3699     Metachunk* curr = chunks_in_use(i);
3700     while (curr != NULL) {
3701       DEBUG_ONLY(do_verify_chunk(curr);)
3702       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3703       curr = curr->next();
3704     }
3705   }
3706 }
3707 
3708 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3709   assert(is_humongous(chunk->word_size()) ||
3710          chunk->word_size() == medium_chunk_size() ||
3711          chunk->word_size() == small_chunk_size() ||
3712          chunk->word_size() == specialized_chunk_size(),
3713          "Chunk size is wrong");
3714   return;
3715 }
3716 
3717 #ifdef ASSERT
3718 void SpaceManager::verify_allocated_blocks_words() {
3719   // Verification is only guaranteed at a safepoint.
3720   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3721     "Verification can fail if the applications is running");
3722   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3723          "allocation total is not consistent " SIZE_FORMAT
3724          " vs " SIZE_FORMAT,
3725          allocated_blocks_words(), sum_used_in_chunks_in_use());
3726 }
3727 
3728 #endif
3729 
3730 void SpaceManager::dump(outputStream* const out) const {
3731   size_t curr_total = 0;
3732   size_t waste = 0;
3733   uint i = 0;
3734   size_t used = 0;
3735   size_t capacity = 0;
3736 
3737   // Add up statistics for all chunks in this SpaceManager.
3738   for (ChunkIndex index = ZeroIndex;
3739        index < NumberOfInUseLists;
3740        index = next_chunk_index(index)) {
3741     for (Metachunk* curr = chunks_in_use(index);
3742          curr != NULL;
3743          curr = curr->next()) {
3744       out->print("%d) ", i++);
3745       curr->print_on(out);
3746       curr_total += curr->word_size();
3747       used += curr->used_word_size();
3748       capacity += curr->word_size();
3749       waste += curr->free_word_size() + curr->overhead();;
3750     }
3751   }
3752 
3753   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3754     if (block_freelists() != NULL) block_freelists()->print_on(out);
3755   }
3756 
3757   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3758   // Free space isn't wasted.
3759   waste -= free;
3760 
3761   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3762                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3763                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3764 }
3765 
3766 // MetaspaceUtils
3767 
3768 
3769 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3770 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3771 
3772 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3773   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3774   return list == NULL ? 0 : list->free_bytes();
3775 }
3776 
3777 size_t MetaspaceUtils::free_bytes() {
3778   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3779 }
3780 
3781 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3782   assert_lock_strong(SpaceManager::expand_lock());
3783   assert(words <= capacity_words(mdtype),
3784          "About to decrement below 0: words " SIZE_FORMAT
3785          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3786          words, mdtype, capacity_words(mdtype));
3787   _capacity_words[mdtype] -= words;
3788 }
3789 
3790 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3791   assert_lock_strong(SpaceManager::expand_lock());
3792   // Needs to be atomic
3793   _capacity_words[mdtype] += words;
3794 }
3795 
3796 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3797   assert(words <= used_words(mdtype),
3798          "About to decrement below 0: words " SIZE_FORMAT
3799          " is greater than _used_words[%u] " SIZE_FORMAT,
3800          words, mdtype, used_words(mdtype));
3801   // For CMS deallocation of the Metaspaces occurs during the
3802   // sweep which is a concurrent phase.  Protection by the expand_lock()
3803   // is not enough since allocation is on a per Metaspace basis
3804   // and protected by the Metaspace lock.
3805   Atomic::sub(words, &_used_words[mdtype]);
3806 }
3807 
3808 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3809   // _used_words tracks allocations for
3810   // each piece of metadata.  Those allocations are
3811   // generally done concurrently by different application
3812   // threads so must be done atomically.
3813   Atomic::add(words, &_used_words[mdtype]);
3814 }
3815 
3816 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3817   size_t used = 0;
3818   ClassLoaderDataGraphMetaspaceIterator iter;
3819   while (iter.repeat()) {
3820     ClassLoaderMetaspace* msp = iter.get_next();
3821     // Sum allocated_blocks_words for each metaspace
3822     if (msp != NULL) {
3823       used += msp->used_words_slow(mdtype);
3824     }
3825   }
3826   return used * BytesPerWord;
3827 }
3828 
3829 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3830   size_t free = 0;
3831   ClassLoaderDataGraphMetaspaceIterator iter;
3832   while (iter.repeat()) {
3833     ClassLoaderMetaspace* msp = iter.get_next();
3834     if (msp != NULL) {
3835       free += msp->free_words_slow(mdtype);
3836     }
3837   }
3838   return free * BytesPerWord;
3839 }
3840 
3841 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3842   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3843     return 0;
3844   }
3845   // Don't count the space in the freelists.  That space will be
3846   // added to the capacity calculation as needed.
3847   size_t capacity = 0;
3848   ClassLoaderDataGraphMetaspaceIterator iter;
3849   while (iter.repeat()) {
3850     ClassLoaderMetaspace* msp = iter.get_next();
3851     if (msp != NULL) {
3852       capacity += msp->capacity_words_slow(mdtype);
3853     }
3854   }
3855   return capacity * BytesPerWord;
3856 }
3857 
3858 size_t MetaspaceUtils::capacity_bytes_slow() {
3859 #ifdef PRODUCT
3860   // Use capacity_bytes() in PRODUCT instead of this function.
3861   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3862 #endif
3863   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3864   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3865   assert(capacity_bytes() == class_capacity + non_class_capacity,
3866          "bad accounting: capacity_bytes() " SIZE_FORMAT
3867          " class_capacity + non_class_capacity " SIZE_FORMAT
3868          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3869          capacity_bytes(), class_capacity + non_class_capacity,
3870          class_capacity, non_class_capacity);
3871 
3872   return class_capacity + non_class_capacity;
3873 }
3874 
3875 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3876   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3877   return list == NULL ? 0 : list->reserved_bytes();
3878 }
3879 
3880 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3881   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3882   return list == NULL ? 0 : list->committed_bytes();
3883 }
3884 
3885 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3886 
3887 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3888   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3889   if (chunk_manager == NULL) {
3890     return 0;
3891   }
3892   chunk_manager->slow_verify();
3893   return chunk_manager->free_chunks_total_words();
3894 }
3895 
3896 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3897   return free_chunks_total_words(mdtype) * BytesPerWord;
3898 }
3899 
3900 size_t MetaspaceUtils::free_chunks_total_words() {
3901   return free_chunks_total_words(Metaspace::ClassType) +
3902          free_chunks_total_words(Metaspace::NonClassType);
3903 }
3904 
3905 size_t MetaspaceUtils::free_chunks_total_bytes() {
3906   return free_chunks_total_words() * BytesPerWord;
3907 }
3908 
3909 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3910   return Metaspace::get_chunk_manager(mdtype) != NULL;
3911 }
3912 
3913 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3914   if (!has_chunk_free_list(mdtype)) {
3915     return MetaspaceChunkFreeListSummary();
3916   }
3917 
3918   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3919   return cm->chunk_free_list_summary();
3920 }
3921 
3922 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3923   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3924                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3925 }
3926 
3927 void MetaspaceUtils::print_on(outputStream* out) {
3928   Metaspace::MetadataType nct = Metaspace::NonClassType;
3929 
3930   out->print_cr(" Metaspace       "
3931                 "used "      SIZE_FORMAT "K, "
3932                 "capacity "  SIZE_FORMAT "K, "
3933                 "committed " SIZE_FORMAT "K, "
3934                 "reserved "  SIZE_FORMAT "K",
3935                 used_bytes()/K,
3936                 capacity_bytes()/K,
3937                 committed_bytes()/K,
3938                 reserved_bytes()/K);
3939 
3940   if (Metaspace::using_class_space()) {
3941     Metaspace::MetadataType ct = Metaspace::ClassType;
3942     out->print_cr("  class space    "
3943                   "used "      SIZE_FORMAT "K, "
3944                   "capacity "  SIZE_FORMAT "K, "
3945                   "committed " SIZE_FORMAT "K, "
3946                   "reserved "  SIZE_FORMAT "K",
3947                   used_bytes(ct)/K,
3948                   capacity_bytes(ct)/K,
3949                   committed_bytes(ct)/K,
3950                   reserved_bytes(ct)/K);
3951   }
3952 }
3953 
3954 // Print information for class space and data space separately.
3955 // This is almost the same as above.
3956 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3957   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3958   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3959   size_t used_bytes = used_bytes_slow(mdtype);
3960   size_t free_bytes = free_bytes_slow(mdtype);
3961   size_t used_and_free = used_bytes + free_bytes +
3962                            free_chunks_capacity_bytes;
3963   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3964              "K + unused in chunks " SIZE_FORMAT "K  + "
3965              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3966              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3967              used_bytes / K,
3968              free_bytes / K,
3969              free_chunks_capacity_bytes / K,
3970              used_and_free / K,
3971              capacity_bytes / K);
3972   // Accounting can only be correct if we got the values during a safepoint
3973   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3974 }
3975 
3976 // Print total fragmentation for class metaspaces
3977 void MetaspaceUtils::print_class_waste(outputStream* out) {
3978   assert(Metaspace::using_class_space(), "class metaspace not used");
3979   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3980   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3981   ClassLoaderDataGraphMetaspaceIterator iter;
3982   while (iter.repeat()) {
3983     ClassLoaderMetaspace* msp = iter.get_next();
3984     if (msp != NULL) {
3985       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3986       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
3987       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
3988       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
3989       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
3990       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
3991       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
3992     }
3993   }
3994   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
3995                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
3996                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
3997                 "large count " SIZE_FORMAT,
3998                 cls_specialized_count, cls_specialized_waste,
3999                 cls_small_count, cls_small_waste,
4000                 cls_medium_count, cls_medium_waste, cls_humongous_count);
4001 }
4002 
4003 // Print total fragmentation for data and class metaspaces separately
4004 void MetaspaceUtils::print_waste(outputStream* out) {
4005   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
4006   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
4007 
4008   ClassLoaderDataGraphMetaspaceIterator iter;
4009   while (iter.repeat()) {
4010     ClassLoaderMetaspace* msp = iter.get_next();
4011     if (msp != NULL) {
4012       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4013       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4014       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4015       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
4016       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4017       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4018       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4019     }
4020   }
4021   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4022   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4023                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4024                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4025                         "large count " SIZE_FORMAT,
4026              specialized_count, specialized_waste, small_count,
4027              small_waste, medium_count, medium_waste, humongous_count);
4028   if (Metaspace::using_class_space()) {
4029     print_class_waste(out);
4030   }
4031 }
4032 
4033 class MetadataStats {
4034 private:
4035   size_t _capacity;
4036   size_t _used;
4037   size_t _free;
4038   size_t _waste;
4039 
4040 public:
4041   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4042   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4043   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4044 
4045   void add(const MetadataStats& stats) {
4046     _capacity += stats.capacity();
4047     _used += stats.used();
4048     _free += stats.free();
4049     _waste += stats.waste();
4050   }
4051 
4052   size_t capacity() const { return _capacity; }
4053   size_t used() const     { return _used; }
4054   size_t free() const     { return _free; }
4055   size_t waste() const    { return _waste; }
4056 
4057   void print_on(outputStream* out, size_t scale) const;
4058 };
4059 
4060 
4061 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4062   const char* unit = scale_unit(scale);
4063   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4064     (float)capacity() / scale, unit,
4065     (float)used() / scale, unit,
4066     (float)free() / scale, unit,
4067     (float)waste() / scale, unit);
4068 }
4069 
4070 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4071 private:
4072   outputStream*  _out;
4073   size_t         _scale;
4074 
4075   size_t         _total_count;
4076   MetadataStats  _total_metadata;
4077   MetadataStats  _total_class;
4078 
4079   size_t         _total_anon_count;
4080   MetadataStats  _total_anon_metadata;
4081   MetadataStats  _total_anon_class;
4082 
4083 public:
4084   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4085   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4086 
4087   ~PrintCLDMetaspaceInfoClosure() {
4088     print_summary();
4089   }
4090 
4091   void do_cld(ClassLoaderData* cld) {
4092     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4093 
4094     if (cld->is_unloading()) return;
4095     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4096     if (msp == NULL) {
4097       return;
4098     }
4099 
4100     bool anonymous = false;
4101     if (cld->is_anonymous()) {
4102       _out->print_cr("ClassLoader: for anonymous class");
4103       anonymous = true;
4104     } else {
4105       ResourceMark rm;
4106       _out->print_cr("ClassLoader: %s", cld->loader_name());
4107     }
4108 
4109     print_metaspace(msp, anonymous);
4110     _out->cr();
4111   }
4112 
4113 private:
4114   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4115   void print_summary() const;
4116 };
4117 
4118 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4119   assert(msp != NULL, "Sanity");
4120   SpaceManager* vsm = msp->vsm();
4121   const char* unit = scale_unit(_scale);
4122 
4123   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4124   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4125   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4126   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4127 
4128   _total_count ++;
4129   MetadataStats metadata_stats(capacity, used, free, waste);
4130   _total_metadata.add(metadata_stats);
4131 
4132   if (anonymous) {
4133     _total_anon_count ++;
4134     _total_anon_metadata.add(metadata_stats);
4135   }
4136 
4137   _out->print("  Metadata   ");
4138   metadata_stats.print_on(_out, _scale);
4139 
4140   if (Metaspace::using_class_space()) {
4141     vsm = msp->class_vsm();
4142 
4143     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4144     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4145     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4146     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4147 
4148     MetadataStats class_stats(capacity, used, free, waste);
4149     _total_class.add(class_stats);
4150 
4151     if (anonymous) {
4152       _total_anon_class.add(class_stats);
4153     }
4154 
4155     _out->print("  Class data ");
4156     class_stats.print_on(_out, _scale);
4157   }
4158 }
4159 
4160 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4161   const char* unit = scale_unit(_scale);
4162   _out->cr();
4163   _out->print_cr("Summary:");
4164 
4165   MetadataStats total;
4166   total.add(_total_metadata);
4167   total.add(_total_class);
4168 
4169   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4170   total.print_on(_out, _scale);
4171 
4172   _out->print("                    Metadata ");
4173   _total_metadata.print_on(_out, _scale);
4174 
4175   if (Metaspace::using_class_space()) {
4176     _out->print("                  Class data ");
4177     _total_class.print_on(_out, _scale);
4178   }
4179   _out->cr();
4180 
4181   MetadataStats total_anon;
4182   total_anon.add(_total_anon_metadata);
4183   total_anon.add(_total_anon_class);
4184 
4185   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4186   total_anon.print_on(_out, _scale);
4187 
4188   _out->print("                    Metadata ");
4189   _total_anon_metadata.print_on(_out, _scale);
4190 
4191   if (Metaspace::using_class_space()) {
4192     _out->print("                  Class data ");
4193     _total_anon_class.print_on(_out, _scale);
4194   }
4195 }
4196 
4197 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4198   const char* unit = scale_unit(scale);
4199   out->print_cr("Metaspaces:");
4200   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4201     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4202     committed_bytes(Metaspace::NonClassType) / scale, unit);
4203   if (Metaspace::using_class_space()) {
4204     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4205     reserved_bytes(Metaspace::ClassType) / scale, unit,
4206     committed_bytes(Metaspace::ClassType) / scale, unit);
4207   }
4208 
4209   out->cr();
4210   ChunkManager::print_all_chunkmanagers(out, scale);
4211 
4212   out->cr();
4213   out->print_cr("Per-classloader metadata:");
4214   out->cr();
4215 
4216   PrintCLDMetaspaceInfoClosure cl(out, scale);
4217   ClassLoaderDataGraph::cld_do(&cl);
4218 }
4219 
4220 
4221 // Dump global metaspace things from the end of ClassLoaderDataGraph
4222 void MetaspaceUtils::dump(outputStream* out) {
4223   out->print_cr("All Metaspace:");
4224   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4225   out->print("class space: "); print_on(out, Metaspace::ClassType);
4226   print_waste(out);
4227 }
4228 
4229 // Prints an ASCII representation of the given space.
4230 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4231   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4232   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4233   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4234   if (vsl != NULL) {
4235     if (for_class) {
4236       if (!Metaspace::using_class_space()) {
4237         out->print_cr("No Class Space.");
4238         return;
4239       }
4240       out->print_raw("---- Metaspace Map (Class Space) ----");
4241     } else {
4242       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4243     }
4244     // Print legend:
4245     out->cr();
4246     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4247     out->cr();
4248     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4249     vsl->print_map(out);
4250     out->cr();
4251   }
4252 }
4253 
4254 void MetaspaceUtils::verify_free_chunks() {
4255   Metaspace::chunk_manager_metadata()->verify();
4256   if (Metaspace::using_class_space()) {
4257     Metaspace::chunk_manager_class()->verify();
4258   }
4259 }
4260 
4261 void MetaspaceUtils::verify_capacity() {
4262 #ifdef ASSERT
4263   size_t running_sum_capacity_bytes = capacity_bytes();
4264   // For purposes of the running sum of capacity, verify against capacity
4265   size_t capacity_in_use_bytes = capacity_bytes_slow();
4266   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4267          "capacity_words() * BytesPerWord " SIZE_FORMAT
4268          " capacity_bytes_slow()" SIZE_FORMAT,
4269          running_sum_capacity_bytes, capacity_in_use_bytes);
4270   for (Metaspace::MetadataType i = Metaspace::ClassType;
4271        i < Metaspace:: MetadataTypeCount;
4272        i = (Metaspace::MetadataType)(i + 1)) {
4273     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4274     assert(capacity_bytes(i) == capacity_in_use_bytes,
4275            "capacity_bytes(%u) " SIZE_FORMAT
4276            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4277            i, capacity_bytes(i), i, capacity_in_use_bytes);
4278   }
4279 #endif
4280 }
4281 
4282 void MetaspaceUtils::verify_used() {
4283 #ifdef ASSERT
4284   size_t running_sum_used_bytes = used_bytes();
4285   // For purposes of the running sum of used, verify against used
4286   size_t used_in_use_bytes = used_bytes_slow();
4287   assert(used_bytes() == used_in_use_bytes,
4288          "used_bytes() " SIZE_FORMAT
4289          " used_bytes_slow()" SIZE_FORMAT,
4290          used_bytes(), used_in_use_bytes);
4291   for (Metaspace::MetadataType i = Metaspace::ClassType;
4292        i < Metaspace:: MetadataTypeCount;
4293        i = (Metaspace::MetadataType)(i + 1)) {
4294     size_t used_in_use_bytes = used_bytes_slow(i);
4295     assert(used_bytes(i) == used_in_use_bytes,
4296            "used_bytes(%u) " SIZE_FORMAT
4297            " used_bytes_slow(%u)" SIZE_FORMAT,
4298            i, used_bytes(i), i, used_in_use_bytes);
4299   }
4300 #endif
4301 }
4302 
4303 void MetaspaceUtils::verify_metrics() {
4304   verify_capacity();
4305   verify_used();
4306 }
4307 
4308 
4309 // Metaspace methods
4310 
4311 size_t Metaspace::_first_chunk_word_size = 0;
4312 size_t Metaspace::_first_class_chunk_word_size = 0;
4313 
4314 size_t Metaspace::_commit_alignment = 0;
4315 size_t Metaspace::_reserve_alignment = 0;
4316 
4317 VirtualSpaceList* Metaspace::_space_list = NULL;
4318 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4319 
4320 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4321 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4322 
4323 #define VIRTUALSPACEMULTIPLIER 2
4324 
4325 #ifdef _LP64
4326 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4327 
4328 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4329   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4330   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4331   // narrow_klass_base is the lower of the metaspace base and the cds base
4332   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4333   // between the lower base and higher address.
4334   address lower_base;
4335   address higher_address;
4336 #if INCLUDE_CDS
4337   if (UseSharedSpaces) {
4338     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4339                           (address)(metaspace_base + compressed_class_space_size()));
4340     lower_base = MIN2(metaspace_base, cds_base);
4341   } else
4342 #endif
4343   {
4344     higher_address = metaspace_base + compressed_class_space_size();
4345     lower_base = metaspace_base;
4346 
4347     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4348     // If compressed class space fits in lower 32G, we don't need a base.
4349     if (higher_address <= (address)klass_encoding_max) {
4350       lower_base = 0; // Effectively lower base is zero.
4351     }
4352   }
4353 
4354   Universe::set_narrow_klass_base(lower_base);
4355 
4356   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4357   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4358   // how dump time narrow_klass_shift is set. Although, CDS can work
4359   // with zero-shift mode also, to be consistent with AOT it uses
4360   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4361   // can be used at same time as AOT code.
4362   if (!UseSharedSpaces
4363       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4364     Universe::set_narrow_klass_shift(0);
4365   } else {
4366     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4367   }
4368   AOTLoader::set_narrow_klass_shift();
4369 }
4370 
4371 #if INCLUDE_CDS
4372 // Return TRUE if the specified metaspace_base and cds_base are close enough
4373 // to work with compressed klass pointers.
4374 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4375   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4376   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4377   address lower_base = MIN2((address)metaspace_base, cds_base);
4378   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4379                                 (address)(metaspace_base + compressed_class_space_size()));
4380   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4381 }
4382 #endif
4383 
4384 // Try to allocate the metaspace at the requested addr.
4385 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4386   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4387   assert(using_class_space(), "called improperly");
4388   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4389   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4390          "Metaspace size is too big");
4391   assert_is_aligned(requested_addr, _reserve_alignment);
4392   assert_is_aligned(cds_base, _reserve_alignment);
4393   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4394 
4395   // Don't use large pages for the class space.
4396   bool large_pages = false;
4397 
4398 #if !(defined(AARCH64) || defined(AIX))
4399   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4400                                              _reserve_alignment,
4401                                              large_pages,
4402                                              requested_addr);
4403 #else // AARCH64
4404   ReservedSpace metaspace_rs;
4405 
4406   // Our compressed klass pointers may fit nicely into the lower 32
4407   // bits.
4408   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4409     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4410                                  _reserve_alignment,
4411                                  large_pages,
4412                                  requested_addr);
4413   }
4414 
4415   if (! metaspace_rs.is_reserved()) {
4416     // Aarch64: Try to align metaspace so that we can decode a compressed
4417     // klass with a single MOVK instruction.  We can do this iff the
4418     // compressed class base is a multiple of 4G.
4419     // Aix: Search for a place where we can find memory. If we need to load
4420     // the base, 4G alignment is helpful, too.
4421     size_t increment = AARCH64_ONLY(4*)G;
4422     for (char *a = align_up(requested_addr, increment);
4423          a < (char*)(1024*G);
4424          a += increment) {
4425       if (a == (char *)(32*G)) {
4426         // Go faster from here on. Zero-based is no longer possible.
4427         increment = 4*G;
4428       }
4429 
4430 #if INCLUDE_CDS
4431       if (UseSharedSpaces
4432           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4433         // We failed to find an aligned base that will reach.  Fall
4434         // back to using our requested addr.
4435         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4436                                      _reserve_alignment,
4437                                      large_pages,
4438                                      requested_addr);
4439         break;
4440       }
4441 #endif
4442 
4443       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4444                                    _reserve_alignment,
4445                                    large_pages,
4446                                    a);
4447       if (metaspace_rs.is_reserved())
4448         break;
4449     }
4450   }
4451 
4452 #endif // AARCH64
4453 
4454   if (!metaspace_rs.is_reserved()) {
4455 #if INCLUDE_CDS
4456     if (UseSharedSpaces) {
4457       size_t increment = align_up(1*G, _reserve_alignment);
4458 
4459       // Keep trying to allocate the metaspace, increasing the requested_addr
4460       // by 1GB each time, until we reach an address that will no longer allow
4461       // use of CDS with compressed klass pointers.
4462       char *addr = requested_addr;
4463       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4464              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4465         addr = addr + increment;
4466         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4467                                      _reserve_alignment, large_pages, addr);
4468       }
4469     }
4470 #endif
4471     // If no successful allocation then try to allocate the space anywhere.  If
4472     // that fails then OOM doom.  At this point we cannot try allocating the
4473     // metaspace as if UseCompressedClassPointers is off because too much
4474     // initialization has happened that depends on UseCompressedClassPointers.
4475     // So, UseCompressedClassPointers cannot be turned off at this point.
4476     if (!metaspace_rs.is_reserved()) {
4477       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4478                                    _reserve_alignment, large_pages);
4479       if (!metaspace_rs.is_reserved()) {
4480         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4481                                               compressed_class_space_size()));
4482       }
4483     }
4484   }
4485 
4486   // If we got here then the metaspace got allocated.
4487   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4488 
4489 #if INCLUDE_CDS
4490   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4491   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4492     FileMapInfo::stop_sharing_and_unmap(
4493         "Could not allocate metaspace at a compatible address");
4494   }
4495 #endif
4496   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4497                                   UseSharedSpaces ? (address)cds_base : 0);
4498 
4499   initialize_class_space(metaspace_rs);
4500 
4501   LogTarget(Trace, gc, metaspace) lt;
4502   if (lt.is_enabled()) {
4503     ResourceMark rm;
4504     LogStream ls(lt);
4505     print_compressed_class_space(&ls, requested_addr);
4506   }
4507 }
4508 
4509 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4510   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4511                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4512   if (_class_space_list != NULL) {
4513     address base = (address)_class_space_list->current_virtual_space()->bottom();
4514     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4515                  compressed_class_space_size(), p2i(base));
4516     if (requested_addr != 0) {
4517       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4518     }
4519     st->cr();
4520   }
4521 }
4522 
4523 // For UseCompressedClassPointers the class space is reserved above the top of
4524 // the Java heap.  The argument passed in is at the base of the compressed space.
4525 void Metaspace::initialize_class_space(ReservedSpace rs) {
4526   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4527   assert(rs.size() >= CompressedClassSpaceSize,
4528          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4529   assert(using_class_space(), "Must be using class space");
4530   _class_space_list = new VirtualSpaceList(rs);
4531   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4532 
4533   if (!_class_space_list->initialization_succeeded()) {
4534     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4535   }
4536 }
4537 
4538 #endif
4539 
4540 void Metaspace::ergo_initialize() {
4541   if (DumpSharedSpaces) {
4542     // Using large pages when dumping the shared archive is currently not implemented.
4543     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4544   }
4545 
4546   size_t page_size = os::vm_page_size();
4547   if (UseLargePages && UseLargePagesInMetaspace) {
4548     page_size = os::large_page_size();
4549   }
4550 
4551   _commit_alignment  = page_size;
4552   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4553 
4554   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4555   // override if MaxMetaspaceSize was set on the command line or not.
4556   // This information is needed later to conform to the specification of the
4557   // java.lang.management.MemoryUsage API.
4558   //
4559   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4560   // globals.hpp to the aligned value, but this is not possible, since the
4561   // alignment depends on other flags being parsed.
4562   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4563 
4564   if (MetaspaceSize > MaxMetaspaceSize) {
4565     MetaspaceSize = MaxMetaspaceSize;
4566   }
4567 
4568   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4569 
4570   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4571 
4572   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4573   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4574 
4575   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4576 
4577   // Initial virtual space size will be calculated at global_initialize()
4578   size_t min_metaspace_sz =
4579       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4580   if (UseCompressedClassPointers) {
4581     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4582       if (min_metaspace_sz >= MaxMetaspaceSize) {
4583         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4584       } else {
4585         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4586                       MaxMetaspaceSize - min_metaspace_sz);
4587       }
4588     }
4589   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4590     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4591                   min_metaspace_sz);
4592   }
4593 
4594   set_compressed_class_space_size(CompressedClassSpaceSize);
4595 }
4596 
4597 void Metaspace::global_initialize() {
4598   MetaspaceGC::initialize();
4599 
4600 #if INCLUDE_CDS
4601   if (DumpSharedSpaces) {
4602     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4603   } else if (UseSharedSpaces) {
4604     // If any of the archived space fails to map, UseSharedSpaces
4605     // is reset to false. Fall through to the
4606     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4607     // metaspace.
4608     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4609   }
4610 
4611   if (!DumpSharedSpaces && !UseSharedSpaces)
4612 #endif // INCLUDE_CDS
4613   {
4614 #ifdef _LP64
4615     if (using_class_space()) {
4616       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4617       allocate_metaspace_compressed_klass_ptrs(base, 0);
4618     }
4619 #endif // _LP64
4620   }
4621 
4622   // Initialize these before initializing the VirtualSpaceList
4623   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4624   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4625   // Make the first class chunk bigger than a medium chunk so it's not put
4626   // on the medium chunk list.   The next chunk will be small and progress
4627   // from there.  This size calculated by -version.
4628   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4629                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4630   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4631   // Arbitrarily set the initial virtual space to a multiple
4632   // of the boot class loader size.
4633   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4634   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4635 
4636   // Initialize the list of virtual spaces.
4637   _space_list = new VirtualSpaceList(word_size);
4638   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4639 
4640   if (!_space_list->initialization_succeeded()) {
4641     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4642   }
4643 
4644   _tracer = new MetaspaceTracer();
4645 }
4646 
4647 void Metaspace::post_initialize() {
4648   MetaspaceGC::post_initialize();
4649 }
4650 
4651 void Metaspace::verify_global_initialization() {
4652   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4653   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4654 
4655   if (using_class_space()) {
4656     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4657     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4658   }
4659 }
4660 
4661 size_t Metaspace::align_word_size_up(size_t word_size) {
4662   size_t byte_size = word_size * wordSize;
4663   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4664 }
4665 
4666 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4667                               MetaspaceObj::Type type, TRAPS) {
4668   assert(!_frozen, "sanity");
4669   if (HAS_PENDING_EXCEPTION) {
4670     assert(false, "Should not allocate with exception pending");
4671     return NULL;  // caller does a CHECK_NULL too
4672   }
4673 
4674   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4675         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4676 
4677   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4678 
4679   // Try to allocate metadata.
4680   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4681 
4682   if (result == NULL) {
4683     if (DumpSharedSpaces && THREAD->is_VM_thread()) {
4684       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4685           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4686       vm_exit(1);
4687     }
4688 
4689     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4690 
4691     // Allocation failed.
4692     if (is_init_completed()) {
4693       // Only start a GC if the bootstrapping has completed.
4694 
4695       // Try to clean out some memory and retry.
4696       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4697     }
4698   }
4699 
4700   if (result == NULL) {
4701     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4702   }
4703 
4704   // Zero initialize.
4705   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4706 
4707   return result;
4708 }
4709 
4710 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4711   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4712 
4713   // If result is still null, we are out of memory.
4714   Log(gc, metaspace, freelist) log;
4715   if (log.is_info()) {
4716     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4717              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4718     ResourceMark rm;
4719     if (log.is_debug()) {
4720       if (loader_data->metaspace_or_null() != NULL) {
4721         LogStream ls(log.debug());
4722         loader_data->print_value_on(&ls);
4723       }
4724     }
4725     LogStream ls(log.info());
4726     MetaspaceUtils::dump(&ls);
4727     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4728     ChunkManager::print_all_chunkmanagers(&ls);
4729   }
4730 
4731   bool out_of_compressed_class_space = false;
4732   if (is_class_space_allocation(mdtype)) {
4733     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4734     out_of_compressed_class_space =
4735       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4736       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4737       CompressedClassSpaceSize;
4738   }
4739 
4740   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4741   const char* space_string = out_of_compressed_class_space ?
4742     "Compressed class space" : "Metaspace";
4743 
4744   report_java_out_of_memory(space_string);
4745 
4746   if (JvmtiExport::should_post_resource_exhausted()) {
4747     JvmtiExport::post_resource_exhausted(
4748         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4749         space_string);
4750   }
4751 
4752   if (!is_init_completed()) {
4753     vm_exit_during_initialization("OutOfMemoryError", space_string);
4754   }
4755 
4756   if (out_of_compressed_class_space) {
4757     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4758   } else {
4759     THROW_OOP(Universe::out_of_memory_error_metaspace());
4760   }
4761 }
4762 
4763 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4764   switch (mdtype) {
4765     case Metaspace::ClassType: return "Class";
4766     case Metaspace::NonClassType: return "Metadata";
4767     default:
4768       assert(false, "Got bad mdtype: %d", (int) mdtype);
4769       return NULL;
4770   }
4771 }
4772 
4773 void Metaspace::purge(MetadataType mdtype) {
4774   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4775 }
4776 
4777 void Metaspace::purge() {
4778   MutexLockerEx cl(SpaceManager::expand_lock(),
4779                    Mutex::_no_safepoint_check_flag);
4780   purge(NonClassType);
4781   if (using_class_space()) {
4782     purge(ClassType);
4783   }
4784 }
4785 
4786 bool Metaspace::contains(const void* ptr) {
4787   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4788     return true;
4789   }
4790   return contains_non_shared(ptr);
4791 }
4792 
4793 bool Metaspace::contains_non_shared(const void* ptr) {
4794   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4795      return true;
4796   }
4797 
4798   return get_space_list(NonClassType)->contains(ptr);
4799 }
4800 
4801 // ClassLoaderMetaspace
4802 
4803 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4804   initialize(lock, type);
4805 }
4806 
4807 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4808   delete _vsm;
4809   if (Metaspace::using_class_space()) {
4810     delete _class_vsm;
4811   }
4812 }
4813 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4814   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4815   if (chunk != NULL) {
4816     // Add to this manager's list of chunks in use and current_chunk().
4817     get_space_manager(mdtype)->add_chunk(chunk, true);
4818   }
4819 }
4820 
4821 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4822   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4823 
4824   // Get a chunk from the chunk freelist
4825   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4826 
4827   if (chunk == NULL) {
4828     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4829                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4830   }
4831 
4832   return chunk;
4833 }
4834 
4835 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4836   Metaspace::verify_global_initialization();
4837 
4838   // Allocate SpaceManager for metadata objects.
4839   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4840 
4841   if (Metaspace::using_class_space()) {
4842     // Allocate SpaceManager for classes.
4843     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4844   }
4845 
4846   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4847 
4848   // Allocate chunk for metadata objects
4849   initialize_first_chunk(type, Metaspace::NonClassType);
4850 
4851   // Allocate chunk for class metadata objects
4852   if (Metaspace::using_class_space()) {
4853     initialize_first_chunk(type, Metaspace::ClassType);
4854   }
4855 }
4856 
4857 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4858   Metaspace::assert_not_frozen();
4859   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4860   if (Metaspace::is_class_space_allocation(mdtype)) {
4861     return  class_vsm()->allocate(word_size);
4862   } else {
4863     return  vsm()->allocate(word_size);
4864   }
4865 }
4866 
4867 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4868   Metaspace::assert_not_frozen();
4869   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4870   assert(delta_bytes > 0, "Must be");
4871 
4872   size_t before = 0;
4873   size_t after = 0;
4874   MetaWord* res;
4875   bool incremented;
4876 
4877   // Each thread increments the HWM at most once. Even if the thread fails to increment
4878   // the HWM, an allocation is still attempted. This is because another thread must then
4879   // have incremented the HWM and therefore the allocation might still succeed.
4880   do {
4881     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4882     res = allocate(word_size, mdtype);
4883   } while (!incremented && res == NULL);
4884 
4885   if (incremented) {
4886     Metaspace::tracer()->report_gc_threshold(before, after,
4887                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4888     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4889   }
4890 
4891   return res;
4892 }
4893 
4894 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4895   if (mdtype == Metaspace::ClassType) {
4896     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4897   } else {
4898     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4899   }
4900 }
4901 
4902 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4903   Metaspace::assert_not_frozen();
4904   if (mdtype == Metaspace::ClassType) {
4905     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4906   } else {
4907     return vsm()->sum_free_in_chunks_in_use();
4908   }
4909 }
4910 
4911 // Space capacity in the Metaspace.  It includes
4912 // space in the list of chunks from which allocations
4913 // have been made. Don't include space in the global freelist and
4914 // in the space available in the dictionary which
4915 // is already counted in some chunk.
4916 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4917   if (mdtype == Metaspace::ClassType) {
4918     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4919   } else {
4920     return vsm()->sum_capacity_in_chunks_in_use();
4921   }
4922 }
4923 
4924 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4925   return used_words_slow(mdtype) * BytesPerWord;
4926 }
4927 
4928 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4929   return capacity_words_slow(mdtype) * BytesPerWord;
4930 }
4931 
4932 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4933   return vsm()->allocated_blocks_bytes() +
4934       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4935 }
4936 
4937 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4938   return vsm()->allocated_chunks_bytes() +
4939       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4940 }
4941 
4942 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4943   Metaspace::assert_not_frozen();
4944   assert(!SafepointSynchronize::is_at_safepoint()
4945          || Thread::current()->is_VM_thread(), "should be the VM thread");
4946 
4947   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4948 
4949   if (is_class && Metaspace::using_class_space()) {
4950     class_vsm()->deallocate(ptr, word_size);
4951   } else {
4952     vsm()->deallocate(ptr, word_size);
4953   }
4954 }
4955 
4956 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4957   assert(Metaspace::using_class_space(), "Has to use class space");
4958   return class_vsm()->calc_chunk_size(word_size);
4959 }
4960 
4961 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4962   // Print both class virtual space counts and metaspace.
4963   if (Verbose) {
4964     vsm()->print_on(out);
4965     if (Metaspace::using_class_space()) {
4966       class_vsm()->print_on(out);
4967     }
4968   }
4969 }
4970 
4971 void ClassLoaderMetaspace::verify() {
4972   vsm()->verify();
4973   if (Metaspace::using_class_space()) {
4974     class_vsm()->verify();
4975   }
4976 }
4977 
4978 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4979   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4980   vsm()->dump(out);
4981   if (Metaspace::using_class_space()) {
4982     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4983     class_vsm()->dump(out);
4984   }
4985 }
4986 
4987 
4988 
4989 #ifdef ASSERT
4990 static void do_verify_chunk(Metachunk* chunk) {
4991   guarantee(chunk != NULL, "Sanity");
4992   // Verify chunk itself; then verify that it is consistent with the
4993   // occupany map of its containing node.
4994   chunk->verify();
4995   VirtualSpaceNode* const vsn = chunk->container();
4996   OccupancyMap* const ocmap = vsn->occupancy_map();
4997   ocmap->verify_for_chunk(chunk);
4998 }
4999 #endif
5000 
5001 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5002   chunk->set_is_tagged_free(!inuse);
5003   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5004   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5005 }
5006 
5007 /////////////// Unit tests ///////////////
5008 
5009 #ifndef PRODUCT
5010 
5011 class TestMetaspaceUtilsTest : AllStatic {
5012  public:
5013   static void test_reserved() {
5014     size_t reserved = MetaspaceUtils::reserved_bytes();
5015 
5016     assert(reserved > 0, "assert");
5017 
5018     size_t committed  = MetaspaceUtils::committed_bytes();
5019     assert(committed <= reserved, "assert");
5020 
5021     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5022     assert(reserved_metadata > 0, "assert");
5023     assert(reserved_metadata <= reserved, "assert");
5024 
5025     if (UseCompressedClassPointers) {
5026       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5027       assert(reserved_class > 0, "assert");
5028       assert(reserved_class < reserved, "assert");
5029     }
5030   }
5031 
5032   static void test_committed() {
5033     size_t committed = MetaspaceUtils::committed_bytes();
5034 
5035     assert(committed > 0, "assert");
5036 
5037     size_t reserved  = MetaspaceUtils::reserved_bytes();
5038     assert(committed <= reserved, "assert");
5039 
5040     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5041     assert(committed_metadata > 0, "assert");
5042     assert(committed_metadata <= committed, "assert");
5043 
5044     if (UseCompressedClassPointers) {
5045       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5046       assert(committed_class > 0, "assert");
5047       assert(committed_class < committed, "assert");
5048     }
5049   }
5050 
5051   static void test_virtual_space_list_large_chunk() {
5052     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5053     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5054     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5055     // vm_allocation_granularity aligned on Windows.
5056     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5057     large_size += (os::vm_page_size()/BytesPerWord);
5058     vs_list->get_new_chunk(large_size, 0);
5059   }
5060 
5061   static void test() {
5062     test_reserved();
5063     test_committed();
5064     test_virtual_space_list_large_chunk();
5065   }
5066 };
5067 
5068 void TestMetaspaceUtils_test() {
5069   TestMetaspaceUtilsTest::test();
5070 }
5071 
5072 class TestVirtualSpaceNodeTest {
5073   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5074                                           size_t& num_small_chunks,
5075                                           size_t& num_specialized_chunks) {
5076     num_medium_chunks = words_left / MediumChunk;
5077     words_left = words_left % MediumChunk;
5078 
5079     num_small_chunks = words_left / SmallChunk;
5080     words_left = words_left % SmallChunk;
5081     // how many specialized chunks can we get?
5082     num_specialized_chunks = words_left / SpecializedChunk;
5083     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5084   }
5085 
5086  public:
5087   static void test() {
5088     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5089     const size_t vsn_test_size_words = MediumChunk  * 4;
5090     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5091 
5092     // The chunk sizes must be multiples of eachother, or this will fail
5093     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5094     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5095 
5096     { // No committed memory in VSN
5097       ChunkManager cm(false);
5098       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5099       vsn.initialize();
5100       vsn.retire(&cm);
5101       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5102     }
5103 
5104     { // All of VSN is committed, half is used by chunks
5105       ChunkManager cm(false);
5106       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5107       vsn.initialize();
5108       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5109       vsn.get_chunk_vs(MediumChunk);
5110       vsn.get_chunk_vs(MediumChunk);
5111       vsn.retire(&cm);
5112       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5113       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5114     }
5115 
5116     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5117     // This doesn't work for systems with vm_page_size >= 16K.
5118     if (page_chunks < MediumChunk) {
5119       // 4 pages of VSN is committed, some is used by chunks
5120       ChunkManager cm(false);
5121       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5122 
5123       vsn.initialize();
5124       vsn.expand_by(page_chunks, page_chunks);
5125       vsn.get_chunk_vs(SmallChunk);
5126       vsn.get_chunk_vs(SpecializedChunk);
5127       vsn.retire(&cm);
5128 
5129       // committed - used = words left to retire
5130       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5131 
5132       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5133       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5134 
5135       assert(num_medium_chunks == 0, "should not get any medium chunks");
5136       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5137       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5138     }
5139 
5140     { // Half of VSN is committed, a humongous chunk is used
5141       ChunkManager cm(false);
5142       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5143       vsn.initialize();
5144       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5145       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5146       vsn.retire(&cm);
5147 
5148       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5149       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5150       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5151 
5152       assert(num_medium_chunks == 0, "should not get any medium chunks");
5153       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5154       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5155     }
5156 
5157   }
5158 
5159 #define assert_is_available_positive(word_size) \
5160   assert(vsn.is_available(word_size), \
5161          #word_size ": " PTR_FORMAT " bytes were not available in " \
5162          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5163          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5164 
5165 #define assert_is_available_negative(word_size) \
5166   assert(!vsn.is_available(word_size), \
5167          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5168          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5169          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5170 
5171   static void test_is_available_positive() {
5172     // Reserve some memory.
5173     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5174     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5175 
5176     // Commit some memory.
5177     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5178     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5179     assert(expanded, "Failed to commit");
5180 
5181     // Check that is_available accepts the committed size.
5182     assert_is_available_positive(commit_word_size);
5183 
5184     // Check that is_available accepts half the committed size.
5185     size_t expand_word_size = commit_word_size / 2;
5186     assert_is_available_positive(expand_word_size);
5187   }
5188 
5189   static void test_is_available_negative() {
5190     // Reserve some memory.
5191     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5192     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5193 
5194     // Commit some memory.
5195     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5196     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5197     assert(expanded, "Failed to commit");
5198 
5199     // Check that is_available doesn't accept a too large size.
5200     size_t two_times_commit_word_size = commit_word_size * 2;
5201     assert_is_available_negative(two_times_commit_word_size);
5202   }
5203 
5204   static void test_is_available_overflow() {
5205     // Reserve some memory.
5206     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5207     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5208 
5209     // Commit some memory.
5210     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5211     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5212     assert(expanded, "Failed to commit");
5213 
5214     // Calculate a size that will overflow the virtual space size.
5215     void* virtual_space_max = (void*)(uintptr_t)-1;
5216     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5217     size_t overflow_size = bottom_to_max + BytesPerWord;
5218     size_t overflow_word_size = overflow_size / BytesPerWord;
5219 
5220     // Check that is_available can handle the overflow.
5221     assert_is_available_negative(overflow_word_size);
5222   }
5223 
5224   static void test_is_available() {
5225     TestVirtualSpaceNodeTest::test_is_available_positive();
5226     TestVirtualSpaceNodeTest::test_is_available_negative();
5227     TestVirtualSpaceNodeTest::test_is_available_overflow();
5228   }
5229 };
5230 
5231 // The following test is placed here instead of a gtest / unittest file
5232 // because the ChunkManager class is only available in this file.
5233 void ChunkManager_test_list_index() {
5234   {
5235     // Test previous bug where a query for a humongous class metachunk,
5236     // incorrectly matched the non-class medium metachunk size.
5237     {
5238       ChunkManager manager(true);
5239 
5240       assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5241 
5242       ChunkIndex index = manager.list_index(MediumChunk);
5243 
5244       assert(index == HumongousIndex,
5245           "Requested size is larger than ClassMediumChunk,"
5246           " so should return HumongousIndex. Got index: %d", (int)index);
5247     }
5248 
5249     // Check the specified sizes as well.
5250     {
5251       ChunkManager manager(true);
5252       assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
5253       assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
5254       assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
5255       assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
5256     }
5257     {
5258       ChunkManager manager(false);
5259       assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
5260       assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
5261       assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
5262       assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
5263     }
5264 
5265   }
5266 
5267 }
5268 
5269 #endif // !PRODUCT
5270 
5271 #ifdef ASSERT
5272 
5273 // The following test is placed here instead of a gtest / unittest file
5274 // because the ChunkManager class is only available in this file.
5275 class SpaceManagerTest : AllStatic {
5276   friend void SpaceManager_test_adjust_initial_chunk_size();
5277 
5278   static void test_adjust_initial_chunk_size(bool is_class) {
5279     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5280     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5281     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5282 
5283 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5284     do {                                                                         \
5285       size_t v = value;                                                          \
5286       size_t e = expected;                                                       \
5287       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5288              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5289     } while (0)
5290 
5291     // Smallest (specialized)
5292     test_adjust_initial_chunk_size(1,            smallest, is_class);
5293     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5294     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5295 
5296     // Small
5297     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5298     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5299     test_adjust_initial_chunk_size(normal,       normal, is_class);
5300 
5301     // Medium
5302     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5303     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5304     test_adjust_initial_chunk_size(medium,     medium, is_class);
5305 
5306     // Humongous
5307     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5308 
5309 #undef test_adjust_initial_chunk_size
5310   }
5311 
5312   static void test_adjust_initial_chunk_size() {
5313     test_adjust_initial_chunk_size(false);
5314     test_adjust_initial_chunk_size(true);
5315   }
5316 };
5317 
5318 void SpaceManager_test_adjust_initial_chunk_size() {
5319   SpaceManagerTest::test_adjust_initial_chunk_size();
5320 }
5321 
5322 #endif // ASSERT
5323 
5324 struct chunkmanager_statistics_t {
5325   int num_specialized_chunks;
5326   int num_small_chunks;
5327   int num_medium_chunks;
5328   int num_humongous_chunks;
5329 };
5330 
5331 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5332   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5333   ChunkManager::ChunkManagerStatistics stat;
5334   chunk_manager->get_statistics(&stat);
5335   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5336   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5337   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5338   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5339 }
5340 
5341 struct chunk_geometry_t {
5342   size_t specialized_chunk_word_size;
5343   size_t small_chunk_word_size;
5344   size_t medium_chunk_word_size;
5345 };
5346 
5347 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5348   if (mdType == Metaspace::NonClassType) {
5349     out->specialized_chunk_word_size = SpecializedChunk;
5350     out->small_chunk_word_size = SmallChunk;
5351     out->medium_chunk_word_size = MediumChunk;
5352   } else {
5353     out->specialized_chunk_word_size = ClassSpecializedChunk;
5354     out->small_chunk_word_size = ClassSmallChunk;
5355     out->medium_chunk_word_size = ClassMediumChunk;
5356   }
5357 }
5358