1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Helper function that does a bunch of checks for a chunk.
  59 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  60 
  61 // Given a Metachunk, update its in-use information (both in the
  62 // chunk and the occupancy map).
  63 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  64 
  65 size_t const allocation_from_dictionary_limit = 4 * K;
  66 
  67 MetaWord* last_allocated = 0;
  68 
  69 size_t Metaspace::_compressed_class_space_size;
  70 const MetaspaceTracer* Metaspace::_tracer = NULL;
  71 
  72 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  73 
  74 enum ChunkSizes {    // in words.
  75   ClassSpecializedChunk = 128,
  76   SpecializedChunk = 128,
  77   ClassSmallChunk = 256,
  78   SmallChunk = 512,
  79   ClassMediumChunk = 4 * K,
  80   MediumChunk = 8 * K
  81 };
  82 
  83 // Returns size of this chunk type.
  84 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  85   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  86   size_t size = 0;
  87   if (is_class) {
  88     switch(chunktype) {
  89       case SpecializedIndex: size = ClassSpecializedChunk; break;
  90       case SmallIndex: size = ClassSmallChunk; break;
  91       case MediumIndex: size = ClassMediumChunk; break;
  92       default:
  93         ShouldNotReachHere();
  94     }
  95   } else {
  96     switch(chunktype) {
  97       case SpecializedIndex: size = SpecializedChunk; break;
  98       case SmallIndex: size = SmallChunk; break;
  99       case MediumIndex: size = MediumChunk; break;
 100       default:
 101         ShouldNotReachHere();
 102     }
 103   }
 104   return size;
 105 }
 106 
 107 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 108   if (is_class) {
 109     if (size == ClassSpecializedChunk) {
 110       return SpecializedIndex;
 111     } else if (size == ClassSmallChunk) {
 112       return SmallIndex;
 113     } else if (size == ClassMediumChunk) {
 114       return MediumIndex;
 115     } else if (size > ClassMediumChunk) {
 116       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 117       return HumongousIndex;
 118     }
 119   } else {
 120     if (size == SpecializedChunk) {
 121       return SpecializedIndex;
 122     } else if (size == SmallChunk) {
 123       return SmallIndex;
 124     } else if (size == MediumChunk) {
 125       return MediumIndex;
 126     } else if (size > MediumChunk) {
 127       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 128       return HumongousIndex;
 129     }
 130   }
 131   ShouldNotReachHere();
 132   return (ChunkIndex)-1;
 133 }
 134 
 135 
 136 static ChunkIndex next_chunk_index(ChunkIndex i) {
 137   assert(i < NumberOfInUseLists, "Out of bound");
 138   return (ChunkIndex) (i+1);
 139 }
 140 
 141 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 142   assert(i > ZeroIndex, "Out of bound");
 143   return (ChunkIndex) (i-1);
 144 }
 145 
 146 static const char* scale_unit(size_t scale) {
 147   switch(scale) {
 148     case 1: return "BYTES";
 149     case K: return "KB";
 150     case M: return "MB";
 151     case G: return "GB";
 152     default:
 153       ShouldNotReachHere();
 154       return NULL;
 155   }
 156 }
 157 
 158 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 159 uint MetaspaceGC::_shrink_factor = 0;
 160 bool MetaspaceGC::_should_concurrent_collect = false;
 161 
 162 typedef class FreeList<Metachunk> ChunkList;
 163 
 164 // Manages the global free lists of chunks.
 165 class ChunkManager : public CHeapObj<mtInternal> {
 166   friend class TestVirtualSpaceNodeTest;
 167 
 168   // Free list of chunks of different sizes.
 169   //   SpecializedChunk
 170   //   SmallChunk
 171   //   MediumChunk
 172   ChunkList _free_chunks[NumberOfFreeLists];
 173 
 174   // Whether or not this is the class chunkmanager.
 175   const bool _is_class;
 176 
 177   // Return non-humongous chunk list by its index.
 178   ChunkList* free_chunks(ChunkIndex index);
 179 
 180   // Returns non-humongous chunk list for the given chunk word size.
 181   ChunkList* find_free_chunks_list(size_t word_size);
 182 
 183   //   HumongousChunk
 184   ChunkTreeDictionary _humongous_dictionary;
 185 
 186   // Returns the humongous chunk dictionary.
 187   ChunkTreeDictionary* humongous_dictionary() {
 188     return &_humongous_dictionary;
 189   }
 190 
 191   // Size, in metaspace words, of all chunks managed by this ChunkManager
 192   size_t _free_chunks_total;
 193   // Number of chunks in this ChunkManager
 194   size_t _free_chunks_count;
 195 
 196   // Update counters after a chunk was added or removed removed.
 197   void account_for_added_chunk(const Metachunk* c);
 198   void account_for_removed_chunk(const Metachunk* c);
 199 
 200   // Debug support
 201 
 202   size_t sum_free_chunks();
 203   size_t sum_free_chunks_count();
 204 
 205   void locked_verify_free_chunks_total();
 206   void slow_locked_verify_free_chunks_total() {
 207     if (VerifyMetaspace) {
 208       locked_verify_free_chunks_total();
 209     }
 210   }
 211   void locked_verify_free_chunks_count();
 212   void slow_locked_verify_free_chunks_count() {
 213     if (VerifyMetaspace) {
 214       locked_verify_free_chunks_count();
 215     }
 216   }
 217   void verify_free_chunks_count();
 218 
 219   // Given a pointer to a chunk, attempts to merge it with neighboring
 220   // free chunks to form a bigger chunk. Returns true if successful.
 221   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 222 
 223   // Helper for chunk merging:
 224   //  Given an address range with 1-n chunks which are all supposed to be
 225   //  free and hence currently managed by this ChunkManager, remove them
 226   //  from this ChunkManager and mark them as invalid.
 227   // - This does not correct the occupancy map.
 228   // - This does not adjust the counters in ChunkManager.
 229   // - Does not adjust container count counter in containing VirtualSpaceNode.
 230   // Returns number of chunks removed.
 231   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 232 
 233   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 234   // split up the larger chunk into n smaller chunks, at least one of which should be
 235   // the target chunk of target chunk size. The smaller chunks, including the target
 236   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 237   // Note that this chunk is supposed to be removed from the freelist right away.
 238   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 239 
 240  public:
 241 
 242   struct ChunkManagerStatistics {
 243     size_t num_by_type[NumberOfFreeLists];
 244     size_t single_size_by_type[NumberOfFreeLists];
 245     size_t total_size_by_type[NumberOfFreeLists];
 246     size_t num_humongous_chunks;
 247     size_t total_size_humongous_chunks;
 248   };
 249 
 250   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 251   void get_statistics(ChunkManagerStatistics* stat) const;
 252   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 253 
 254 
 255   ChunkManager(bool is_class)
 256       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 257     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 258     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 259     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 260   }
 261 
 262   // Add or delete (return) a chunk to the global freelist.
 263   Metachunk* chunk_freelist_allocate(size_t word_size);
 264 
 265   // Map a size to a list index assuming that there are lists
 266   // for special, small, medium, and humongous chunks.
 267   ChunkIndex list_index(size_t size);
 268 
 269   // Map a given index to the chunk size.
 270   size_t size_by_index(ChunkIndex index) const;
 271 
 272   bool is_class() const { return _is_class; }
 273 
 274   // Convenience accessors.
 275   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 276   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 277   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 278 
 279   // Take a chunk from the ChunkManager. The chunk is expected to be in
 280   // the chunk manager (the freelist if non-humongous, the dictionary if
 281   // humongous).
 282   void remove_chunk(Metachunk* chunk);
 283 
 284   // Return a single chunk of type index to the ChunkManager.
 285   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 286 
 287   // Add the simple linked list of chunks to the freelist of chunks
 288   // of type index.
 289   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 290 
 291   // Total of the space in the free chunks list
 292   size_t free_chunks_total_words();
 293   size_t free_chunks_total_bytes();
 294 
 295   // Number of chunks in the free chunks list
 296   size_t free_chunks_count();
 297 
 298   // Remove from a list by size.  Selects list based on size of chunk.
 299   Metachunk* free_chunks_get(size_t chunk_word_size);
 300 
 301 #define index_bounds_check(index)                                         \
 302   assert(index == SpecializedIndex ||                                     \
 303          index == SmallIndex ||                                           \
 304          index == MediumIndex ||                                          \
 305          index == HumongousIndex, "Bad index: %d", (int) index)
 306 
 307   size_t num_free_chunks(ChunkIndex index) const {
 308     index_bounds_check(index);
 309 
 310     if (index == HumongousIndex) {
 311       return _humongous_dictionary.total_free_blocks();
 312     }
 313 
 314     ssize_t count = _free_chunks[index].count();
 315     return count == -1 ? 0 : (size_t) count;
 316   }
 317 
 318   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 319     index_bounds_check(index);
 320 
 321     size_t word_size = 0;
 322     if (index == HumongousIndex) {
 323       word_size = _humongous_dictionary.total_size();
 324     } else {
 325       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 326       word_size = size_per_chunk_in_words * num_free_chunks(index);
 327     }
 328 
 329     return word_size * BytesPerWord;
 330   }
 331 
 332   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 333     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 334                                          num_free_chunks(SmallIndex),
 335                                          num_free_chunks(MediumIndex),
 336                                          num_free_chunks(HumongousIndex),
 337                                          size_free_chunks_in_bytes(SpecializedIndex),
 338                                          size_free_chunks_in_bytes(SmallIndex),
 339                                          size_free_chunks_in_bytes(MediumIndex),
 340                                          size_free_chunks_in_bytes(HumongousIndex));
 341   }
 342 
 343   // Debug support
 344   void verify();
 345   void slow_verify() {
 346     if (VerifyMetaspace) {
 347       verify();
 348     }
 349   }
 350   void locked_verify();
 351   void slow_locked_verify() {
 352     if (VerifyMetaspace) {
 353       locked_verify();
 354     }
 355   }
 356   void verify_free_chunks_total();
 357 
 358   void locked_print_free_chunks(outputStream* st);
 359   void locked_print_sum_free_chunks(outputStream* st);
 360 
 361   void print_on(outputStream* st) const;
 362 
 363   // Prints composition for both non-class and (if available)
 364   // class chunk manager.
 365   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 366 };
 367 
 368 class SmallBlocks : public CHeapObj<mtClass> {
 369   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 370   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 371 
 372  private:
 373   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 374 
 375   FreeList<Metablock>& list_at(size_t word_size) {
 376     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 377     return _small_lists[word_size - _small_block_min_size];
 378   }
 379 
 380  public:
 381   SmallBlocks() {
 382     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 383       uint k = i - _small_block_min_size;
 384       _small_lists[k].set_size(i);
 385     }
 386   }
 387 
 388   size_t total_size() const {
 389     size_t result = 0;
 390     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 391       uint k = i - _small_block_min_size;
 392       result = result + _small_lists[k].count() * _small_lists[k].size();
 393     }
 394     return result;
 395   }
 396 
 397   static uint small_block_max_size() { return _small_block_max_size; }
 398   static uint small_block_min_size() { return _small_block_min_size; }
 399 
 400   MetaWord* get_block(size_t word_size) {
 401     if (list_at(word_size).count() > 0) {
 402       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 403       return new_block;
 404     } else {
 405       return NULL;
 406     }
 407   }
 408   void return_block(Metablock* free_chunk, size_t word_size) {
 409     list_at(word_size).return_chunk_at_head(free_chunk, false);
 410     assert(list_at(word_size).count() > 0, "Should have a chunk");
 411   }
 412 
 413   void print_on(outputStream* st) const {
 414     st->print_cr("SmallBlocks:");
 415     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 416       uint k = i - _small_block_min_size;
 417       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 418     }
 419   }
 420 };
 421 
 422 // Used to manage the free list of Metablocks (a block corresponds
 423 // to the allocation of a quantum of metadata).
 424 class BlockFreelist : public CHeapObj<mtClass> {
 425   BlockTreeDictionary* const _dictionary;
 426   SmallBlocks* _small_blocks;
 427 
 428   // Only allocate and split from freelist if the size of the allocation
 429   // is at least 1/4th the size of the available block.
 430   const static int WasteMultiplier = 4;
 431 
 432   // Accessors
 433   BlockTreeDictionary* dictionary() const { return _dictionary; }
 434   SmallBlocks* small_blocks() {
 435     if (_small_blocks == NULL) {
 436       _small_blocks = new SmallBlocks();
 437     }
 438     return _small_blocks;
 439   }
 440 
 441  public:
 442   BlockFreelist();
 443   ~BlockFreelist();
 444 
 445   // Get and return a block to the free list
 446   MetaWord* get_block(size_t word_size);
 447   void return_block(MetaWord* p, size_t word_size);
 448 
 449   size_t total_size() const  {
 450     size_t result = dictionary()->total_size();
 451     if (_small_blocks != NULL) {
 452       result = result + _small_blocks->total_size();
 453     }
 454     return result;
 455   }
 456 
 457   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 458   void print_on(outputStream* st) const;
 459 };
 460 
 461 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 462 template <typename T> struct all_ones  { static const T value; };
 463 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 464 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 465 
 466 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 467 // keeps information about
 468 // - where a chunk starts
 469 // - whether a chunk is in-use or free
 470 // A bit in this bitmap represents one range of memory in the smallest
 471 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 472 class OccupancyMap : public CHeapObj<mtInternal> {
 473 
 474   // The address range this map covers.
 475   const MetaWord* const _reference_address;
 476   const size_t _word_size;
 477 
 478   // The word size of a specialized chunk, aka the number of words one
 479   // bit in this map represents.
 480   const size_t _smallest_chunk_word_size;
 481 
 482   // map data
 483   // Data are organized in two bit layers:
 484   // The first layer is the chunk-start-map. Here, a bit is set to mark
 485   // the corresponding region as the head of a chunk.
 486   // The second layer is the in-use-map. Here, a set bit indicates that
 487   // the corresponding belongs to a chunk which is in use.
 488   uint8_t* _map[2];
 489 
 490   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 491 
 492   // length, in bytes, of bitmap data
 493   size_t _map_size;
 494 
 495   // Returns true if bit at position pos at bit-layer layer is set.
 496   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 497     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 498     const unsigned byteoffset = pos / 8;
 499     assert(byteoffset < _map_size,
 500            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 501     const unsigned mask = 1 << (pos % 8);
 502     return (_map[layer][byteoffset] & mask) > 0;
 503   }
 504 
 505   // Changes bit at position pos at bit-layer layer to value v.
 506   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 507     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 508     const unsigned byteoffset = pos / 8;
 509     assert(byteoffset < _map_size,
 510            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 511     const unsigned mask = 1 << (pos % 8);
 512     if (v) {
 513       _map[layer][byteoffset] |= mask;
 514     } else {
 515       _map[layer][byteoffset] &= ~mask;
 516     }
 517   }
 518 
 519   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 520   // pos is 32/64 aligned and num_bits is 32/64.
 521   // This is the typical case when coalescing to medium chunks, whose size is
 522   // 32 or 64 times the specialized chunk size (depending on class or non class
 523   // case), so they occupy 64 bits which should be 64bit aligned, because
 524   // chunks are chunk-size aligned.
 525   template <typename T>
 526   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 527     assert(_map_size > 0, "not initialized");
 528     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 529     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 530     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 531     const size_t byteoffset = pos / 8;
 532     assert(byteoffset <= (_map_size - sizeof(T)),
 533            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 534     const T w = *(T*)(_map[layer] + byteoffset);
 535     return w > 0 ? true : false;
 536   }
 537 
 538   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 539   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 540     if (pos % 32 == 0 && num_bits == 32) {
 541       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 542     } else if (pos % 64 == 0 && num_bits == 64) {
 543       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 544     } else {
 545       for (unsigned n = 0; n < num_bits; n ++) {
 546         if (get_bit_at_position(pos + n, layer)) {
 547           return true;
 548         }
 549       }
 550     }
 551     return false;
 552   }
 553 
 554   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 555   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 556     assert(word_size % _smallest_chunk_word_size == 0,
 557         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 558     const unsigned pos = get_bitpos_for_address(p);
 559     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 560     return is_any_bit_set_in_region(pos, num_bits, layer);
 561   }
 562 
 563   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 564   // pos is 32/64 aligned and num_bits is 32/64.
 565   // This is the typical case when coalescing to medium chunks, whose size
 566   // is 32 or 64 times the specialized chunk size (depending on class or non
 567   // class case), so they occupy 64 bits which should be 64bit aligned,
 568   // because chunks are chunk-size aligned.
 569   template <typename T>
 570   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 571     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 572            (unsigned)(sizeof(T) * 8), pos);
 573     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 574            num_bits, (unsigned)(sizeof(T) * 8));
 575     const size_t byteoffset = pos / 8;
 576     assert(byteoffset <= (_map_size - sizeof(T)),
 577            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 578     T* const pw = (T*)(_map[layer] + byteoffset);
 579     *pw = v ? all_ones<T>::value : (T) 0;
 580   }
 581 
 582   // Set all bits in a region starting at pos to a value.
 583   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 584     assert(_map_size > 0, "not initialized");
 585     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 586     if (pos % 32 == 0 && num_bits == 32) {
 587       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 588     } else if (pos % 64 == 0 && num_bits == 64) {
 589       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 590     } else {
 591       for (unsigned n = 0; n < num_bits; n ++) {
 592         set_bit_at_position(pos + n, layer, v);
 593       }
 594     }
 595   }
 596 
 597   // Helper: sets all bits in a region [p, p+word_size).
 598   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 599     assert(word_size % _smallest_chunk_word_size == 0,
 600         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 601     const unsigned pos = get_bitpos_for_address(p);
 602     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 603     set_bits_of_region(pos, num_bits, layer, v);
 604   }
 605 
 606   // Helper: given an address, return the bit position representing that address.
 607   unsigned get_bitpos_for_address(const MetaWord* p) const {
 608     assert(_reference_address != NULL, "not initialized");
 609     assert(p >= _reference_address && p < _reference_address + _word_size,
 610            "Address %p out of range for occupancy map [%p..%p).",
 611             p, _reference_address, _reference_address + _word_size);
 612     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 613            "Address not aligned (%p).", p);
 614     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 615     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 616     return (unsigned) d;
 617   }
 618 
 619  public:
 620 
 621   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 622     _reference_address(reference_address), _word_size(word_size),
 623     _smallest_chunk_word_size(smallest_chunk_word_size) {
 624     assert(reference_address != NULL, "invalid reference address");
 625     assert(is_aligned(reference_address, smallest_chunk_word_size),
 626            "Reference address not aligned to smallest chunk size.");
 627     assert(is_aligned(word_size, smallest_chunk_word_size),
 628            "Word_size shall be a multiple of the smallest chunk size.");
 629     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 630     size_t num_bits = word_size / smallest_chunk_word_size;
 631     _map_size = (num_bits + 7) / 8;
 632     assert(_map_size * 8 >= num_bits, "sanity");
 633     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 634     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 635     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 636     memset(_map[1], 0, _map_size);
 637     memset(_map[0], 0, _map_size);
 638     // Sanity test: the first respectively last possible chunk start address in
 639     // the covered range shall map to the first and last bit in the bitmap.
 640     assert(get_bitpos_for_address(reference_address) == 0,
 641       "First chunk address in range must map to fist bit in bitmap.");
 642     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 643       "Last chunk address in range must map to last bit in bitmap.");
 644   }
 645 
 646   ~OccupancyMap() {
 647     os::free(_map[0]);
 648     os::free(_map[1]);
 649   }
 650 
 651   // Returns true if at address x a chunk is starting.
 652   bool chunk_starts_at_address(MetaWord* p) const {
 653     const unsigned pos = get_bitpos_for_address(p);
 654     return get_bit_at_position(pos, layer_chunk_start_map);
 655   }
 656 
 657   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 658     const unsigned pos = get_bitpos_for_address(p);
 659     set_bit_at_position(pos, layer_chunk_start_map, v);
 660   }
 661 
 662   // Removes all chunk-start-bits inside a region, typically as a
 663   // result of a chunk merge.
 664   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 665     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 666   }
 667 
 668   // Returns true if there are life (in use) chunks in the region limited
 669   // by [p, p+word_size).
 670   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 671     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 672   }
 673 
 674   // Marks the region starting at p with the size word_size as in use
 675   // or free, depending on v.
 676   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 677     set_bits_of_region(p, word_size, layer_in_use_map, v);
 678   }
 679 
 680 #ifdef ASSERT
 681   // Verify occupancy map for the address range [from, to).
 682   // We need to tell it the address range, because the memory the
 683   // occupancy map is covering may not be fully comitted yet.
 684   void verify(MetaWord* from, MetaWord* to) {
 685     Metachunk* chunk = NULL;
 686     int nth_bit_for_chunk = 0;
 687     MetaWord* chunk_end = NULL;
 688     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 689       const unsigned pos = get_bitpos_for_address(p);
 690       // Check the chunk-starts-info:
 691       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 692         // Chunk start marked in bitmap.
 693         chunk = (Metachunk*) p;
 694         if (chunk_end != NULL) {
 695           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 696                  "the next chunk to start at %p).", p, chunk_end);
 697         }
 698         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 699         if (chunk->get_chunk_type() != HumongousIndex) {
 700           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 701         }
 702         chunk_end = p + chunk->word_size();
 703         nth_bit_for_chunk = 0;
 704         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 705       } else {
 706         // No chunk start marked in bitmap.
 707         assert(chunk != NULL, "Chunk should start at start of address range.");
 708         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 709         nth_bit_for_chunk ++;
 710       }
 711       // Check the in-use-info:
 712       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 713       if (in_use_bit) {
 714         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 715                chunk, nth_bit_for_chunk);
 716       } else {
 717         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 718                chunk, nth_bit_for_chunk);
 719       }
 720     }
 721   }
 722 
 723   // Verify that a given chunk is correctly accounted for in the bitmap.
 724   void verify_for_chunk(Metachunk* chunk) {
 725     assert(chunk_starts_at_address((MetaWord*) chunk),
 726            "No chunk start marked in map for chunk %p.", chunk);
 727     // For chunks larger than the minimal chunk size, no other chunk
 728     // must start in its area.
 729     if (chunk->word_size() > _smallest_chunk_word_size) {
 730       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 731                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 732              "No chunk must start within another chunk.");
 733     }
 734     if (!chunk->is_tagged_free()) {
 735       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 736              "Chunk %p is in use but marked as free in map (%d %d).",
 737              chunk, chunk->get_chunk_type(), chunk->get_origin());
 738     } else {
 739       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 740              "Chunk %p is free but marked as in-use in map (%d %d).",
 741              chunk, chunk->get_chunk_type(), chunk->get_origin());
 742     }
 743   }
 744 
 745 #endif // ASSERT
 746 
 747 };
 748 
 749 // A VirtualSpaceList node.
 750 class VirtualSpaceNode : public CHeapObj<mtClass> {
 751   friend class VirtualSpaceList;
 752 
 753   // Link to next VirtualSpaceNode
 754   VirtualSpaceNode* _next;
 755 
 756   // Whether this node is contained in class or metaspace.
 757   const bool _is_class;
 758 
 759   // total in the VirtualSpace
 760   MemRegion _reserved;
 761   ReservedSpace _rs;
 762   VirtualSpace _virtual_space;
 763   MetaWord* _top;
 764   // count of chunks contained in this VirtualSpace
 765   uintx _container_count;
 766 
 767   OccupancyMap* _occupancy_map;
 768 
 769   // Convenience functions to access the _virtual_space
 770   char* low()  const { return virtual_space()->low(); }
 771   char* high() const { return virtual_space()->high(); }
 772 
 773   // The first Metachunk will be allocated at the bottom of the
 774   // VirtualSpace
 775   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 776 
 777   // Committed but unused space in the virtual space
 778   size_t free_words_in_vs() const;
 779 
 780   // True if this node belongs to class metaspace.
 781   bool is_class() const { return _is_class; }
 782 
 783   // Helper function for take_from_committed: allocate padding chunks
 784   // until top is at the given address.
 785   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 786 
 787  public:
 788 
 789   VirtualSpaceNode(bool is_class, size_t byte_size);
 790   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 791     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 792   ~VirtualSpaceNode();
 793 
 794   // Convenience functions for logical bottom and end
 795   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 796   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 797 
 798   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 799   OccupancyMap* occupancy_map() { return _occupancy_map; }
 800 
 801   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 802 
 803   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 804   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 805 
 806   bool is_pre_committed() const { return _virtual_space.special(); }
 807 
 808   // address of next available space in _virtual_space;
 809   // Accessors
 810   VirtualSpaceNode* next() { return _next; }
 811   void set_next(VirtualSpaceNode* v) { _next = v; }
 812 
 813   void set_reserved(MemRegion const v) { _reserved = v; }
 814   void set_top(MetaWord* v) { _top = v; }
 815 
 816   // Accessors
 817   MemRegion* reserved() { return &_reserved; }
 818   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 819 
 820   // Returns true if "word_size" is available in the VirtualSpace
 821   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 822 
 823   MetaWord* top() const { return _top; }
 824   void inc_top(size_t word_size) { _top += word_size; }
 825 
 826   uintx container_count() { return _container_count; }
 827   void inc_container_count();
 828   void dec_container_count();
 829 #ifdef ASSERT
 830   uintx container_count_slow();
 831   void verify_container_count();
 832 #endif
 833 
 834   // used and capacity in this single entry in the list
 835   size_t used_words_in_vs() const;
 836   size_t capacity_words_in_vs() const;
 837 
 838   bool initialize();
 839 
 840   // get space from the virtual space
 841   Metachunk* take_from_committed(size_t chunk_word_size);
 842 
 843   // Allocate a chunk from the virtual space and return it.
 844   Metachunk* get_chunk_vs(size_t chunk_word_size);
 845 
 846   // Expands/shrinks the committed space in a virtual space.  Delegates
 847   // to Virtualspace
 848   bool expand_by(size_t min_words, size_t preferred_words);
 849 
 850   // In preparation for deleting this node, remove all the chunks
 851   // in the node from any freelist.
 852   void purge(ChunkManager* chunk_manager);
 853 
 854   // If an allocation doesn't fit in the current node a new node is created.
 855   // Allocate chunks out of the remaining committed space in this node
 856   // to avoid wasting that memory.
 857   // This always adds up because all the chunk sizes are multiples of
 858   // the smallest chunk size.
 859   void retire(ChunkManager* chunk_manager);
 860 
 861 
 862   void print_on(outputStream* st) const;
 863   void print_map(outputStream* st, bool is_class) const;
 864 
 865   // Debug support
 866   DEBUG_ONLY(void mangle();)
 867   // Verify counters, all chunks in this list node and the occupancy map.
 868   DEBUG_ONLY(void verify();)
 869   // Verify that all free chunks in this node are ideally merged
 870   // (there not should be multiple small chunks where a large chunk could exist.)
 871   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 872 
 873 };
 874 
 875 #define assert_is_aligned(value, alignment)                  \
 876   assert(is_aligned((value), (alignment)),                   \
 877          SIZE_FORMAT_HEX " is not aligned to "               \
 878          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 879 
 880 // Decide if large pages should be committed when the memory is reserved.
 881 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 882   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 883     size_t words = bytes / BytesPerWord;
 884     bool is_class = false; // We never reserve large pages for the class space.
 885     if (MetaspaceGC::can_expand(words, is_class) &&
 886         MetaspaceGC::allowed_expansion() >= words) {
 887       return true;
 888     }
 889   }
 890 
 891   return false;
 892 }
 893 
 894   // byte_size is the size of the associated virtualspace.
 895 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 896   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 897   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 898   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 899   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 900 
 901   if (_rs.is_reserved()) {
 902     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 903     assert(_rs.size() != 0, "Catch if we get a 0 size");
 904     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 905     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 906 
 907     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 908   }
 909 }
 910 
 911 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 912   DEBUG_ONLY(this->verify();)
 913   Metachunk* chunk = first_chunk();
 914   Metachunk* invalid_chunk = (Metachunk*) top();
 915   while (chunk < invalid_chunk ) {
 916     assert(chunk->is_tagged_free(), "Should be tagged free");
 917     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 918     chunk_manager->remove_chunk(chunk);
 919     chunk->remove_sentinel();
 920     assert(chunk->next() == NULL &&
 921            chunk->prev() == NULL,
 922            "Was not removed from its list");
 923     chunk = (Metachunk*) next;
 924   }
 925 }
 926 
 927 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 928 
 929   if (bottom() == top()) {
 930     return;
 931   }
 932 
 933   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 934   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 935   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 936 
 937   int line_len = 100;
 938   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 939   line_len = (int)(section_len / spec_chunk_size);
 940 
 941   static const int NUM_LINES = 4;
 942 
 943   char* lines[NUM_LINES];
 944   for (int i = 0; i < NUM_LINES; i ++) {
 945     lines[i] = (char*)os::malloc(line_len, mtInternal);
 946   }
 947   int pos = 0;
 948   const MetaWord* p = bottom();
 949   const Metachunk* chunk = (const Metachunk*)p;
 950   const MetaWord* chunk_end = p + chunk->word_size();
 951   while (p < top()) {
 952     if (pos == line_len) {
 953       pos = 0;
 954       for (int i = 0; i < NUM_LINES; i ++) {
 955         st->fill_to(22);
 956         st->print_raw(lines[i], line_len);
 957         st->cr();
 958       }
 959     }
 960     if (pos == 0) {
 961       st->print(PTR_FORMAT ":", p2i(p));
 962     }
 963     if (p == chunk_end) {
 964       chunk = (Metachunk*)p;
 965       chunk_end = p + chunk->word_size();
 966     }
 967     // line 1: chunk starting points (a dot if that area is a chunk start).
 968     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 969 
 970     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 971     // chunk is in use.
 972     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 973     if (chunk->word_size() == spec_chunk_size) {
 974       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 975     } else if (chunk->word_size() == small_chunk_size) {
 976       lines[1][pos] = chunk_is_free ? 's' : 'S';
 977     } else if (chunk->word_size() == med_chunk_size) {
 978       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 979     } else if (chunk->word_size() > med_chunk_size) {
 980       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 981     } else {
 982       ShouldNotReachHere();
 983     }
 984 
 985     // Line 3: chunk origin
 986     const ChunkOrigin origin = chunk->get_origin();
 987     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 988 
 989     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 990     //         but were never used.
 991     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 992 
 993     p += spec_chunk_size;
 994     pos ++;
 995   }
 996   if (pos > 0) {
 997     for (int i = 0; i < NUM_LINES; i ++) {
 998       st->fill_to(22);
 999       st->print_raw(lines[i], line_len);
1000       st->cr();
1001     }
1002   }
1003   for (int i = 0; i < NUM_LINES; i ++) {
1004     os::free(lines[i]);
1005   }
1006 }
1007 
1008 
1009 #ifdef ASSERT
1010 uintx VirtualSpaceNode::container_count_slow() {
1011   uintx count = 0;
1012   Metachunk* chunk = first_chunk();
1013   Metachunk* invalid_chunk = (Metachunk*) top();
1014   while (chunk < invalid_chunk ) {
1015     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1016     do_verify_chunk(chunk);
1017     // Don't count the chunks on the free lists.  Those are
1018     // still part of the VirtualSpaceNode but not currently
1019     // counted.
1020     if (!chunk->is_tagged_free()) {
1021       count++;
1022     }
1023     chunk = (Metachunk*) next;
1024   }
1025   return count;
1026 }
1027 #endif
1028 
1029 #ifdef ASSERT
1030 // Verify counters, all chunks in this list node and the occupancy map.
1031 void VirtualSpaceNode::verify() {
1032   uintx num_in_use_chunks = 0;
1033   Metachunk* chunk = first_chunk();
1034   Metachunk* invalid_chunk = (Metachunk*) top();
1035 
1036   // Iterate the chunks in this node and verify each chunk.
1037   while (chunk < invalid_chunk ) {
1038     DEBUG_ONLY(do_verify_chunk(chunk);)
1039     if (!chunk->is_tagged_free()) {
1040       num_in_use_chunks ++;
1041     }
1042     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1043     chunk = (Metachunk*) next;
1044   }
1045   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1046          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1047   // Also verify the occupancy map.
1048   occupancy_map()->verify(this->bottom(), this->top());
1049 }
1050 #endif // ASSERT
1051 
1052 #ifdef ASSERT
1053 // Verify that all free chunks in this node are ideally merged
1054 // (there not should be multiple small chunks where a large chunk could exist.)
1055 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1056   Metachunk* chunk = first_chunk();
1057   Metachunk* invalid_chunk = (Metachunk*) top();
1058   // Shorthands.
1059   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1060   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1061   int num_free_chunks_since_last_med_boundary = -1;
1062   int num_free_chunks_since_last_small_boundary = -1;
1063   while (chunk < invalid_chunk ) {
1064     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1065     // Reset the counter when encountering a non-free chunk.
1066     if (chunk->get_chunk_type() != HumongousIndex) {
1067       if (chunk->is_tagged_free()) {
1068         // Count successive free, non-humongous chunks.
1069         if (is_aligned(chunk, size_small)) {
1070           assert(num_free_chunks_since_last_small_boundary <= 1,
1071                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1072           num_free_chunks_since_last_small_boundary = 0;
1073         } else if (num_free_chunks_since_last_small_boundary != -1) {
1074           num_free_chunks_since_last_small_boundary ++;
1075         }
1076         if (is_aligned(chunk, size_med)) {
1077           assert(num_free_chunks_since_last_med_boundary <= 1,
1078                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1079           num_free_chunks_since_last_med_boundary = 0;
1080         } else if (num_free_chunks_since_last_med_boundary != -1) {
1081           num_free_chunks_since_last_med_boundary ++;
1082         }
1083       } else {
1084         // Encountering a non-free chunk, reset counters.
1085         num_free_chunks_since_last_med_boundary = -1;
1086         num_free_chunks_since_last_small_boundary = -1;
1087       }
1088     } else {
1089       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1090       num_free_chunks_since_last_med_boundary = -1;
1091       num_free_chunks_since_last_small_boundary = -1;
1092     }
1093 
1094     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1095     chunk = (Metachunk*) next;
1096   }
1097 }
1098 #endif // ASSERT
1099 
1100 // List of VirtualSpaces for metadata allocation.
1101 class VirtualSpaceList : public CHeapObj<mtClass> {
1102   friend class VirtualSpaceNode;
1103 
1104   enum VirtualSpaceSizes {
1105     VirtualSpaceSize = 256 * K
1106   };
1107 
1108   // Head of the list
1109   VirtualSpaceNode* _virtual_space_list;
1110   // virtual space currently being used for allocations
1111   VirtualSpaceNode* _current_virtual_space;
1112 
1113   // Is this VirtualSpaceList used for the compressed class space
1114   bool _is_class;
1115 
1116   // Sum of reserved and committed memory in the virtual spaces
1117   size_t _reserved_words;
1118   size_t _committed_words;
1119 
1120   // Number of virtual spaces
1121   size_t _virtual_space_count;
1122 
1123   ~VirtualSpaceList();
1124 
1125   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1126 
1127   void set_virtual_space_list(VirtualSpaceNode* v) {
1128     _virtual_space_list = v;
1129   }
1130   void set_current_virtual_space(VirtualSpaceNode* v) {
1131     _current_virtual_space = v;
1132   }
1133 
1134   void link_vs(VirtualSpaceNode* new_entry);
1135 
1136   // Get another virtual space and add it to the list.  This
1137   // is typically prompted by a failed attempt to allocate a chunk
1138   // and is typically followed by the allocation of a chunk.
1139   bool create_new_virtual_space(size_t vs_word_size);
1140 
1141   // Chunk up the unused committed space in the current
1142   // virtual space and add the chunks to the free list.
1143   void retire_current_virtual_space();
1144 
1145  public:
1146   VirtualSpaceList(size_t word_size);
1147   VirtualSpaceList(ReservedSpace rs);
1148 
1149   size_t free_bytes();
1150 
1151   Metachunk* get_new_chunk(size_t chunk_word_size,
1152                            size_t suggested_commit_granularity);
1153 
1154   bool expand_node_by(VirtualSpaceNode* node,
1155                       size_t min_words,
1156                       size_t preferred_words);
1157 
1158   bool expand_by(size_t min_words,
1159                  size_t preferred_words);
1160 
1161   VirtualSpaceNode* current_virtual_space() {
1162     return _current_virtual_space;
1163   }
1164 
1165   bool is_class() const { return _is_class; }
1166 
1167   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1168 
1169   size_t reserved_words()  { return _reserved_words; }
1170   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1171   size_t committed_words() { return _committed_words; }
1172   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1173 
1174   void inc_reserved_words(size_t v);
1175   void dec_reserved_words(size_t v);
1176   void inc_committed_words(size_t v);
1177   void dec_committed_words(size_t v);
1178   void inc_virtual_space_count();
1179   void dec_virtual_space_count();
1180 
1181   bool contains(const void* ptr);
1182 
1183   // Unlink empty VirtualSpaceNodes and free it.
1184   void purge(ChunkManager* chunk_manager);
1185 
1186   void print_on(outputStream* st) const;
1187   void print_map(outputStream* st) const;
1188 
1189   class VirtualSpaceListIterator : public StackObj {
1190     VirtualSpaceNode* _virtual_spaces;
1191    public:
1192     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1193       _virtual_spaces(virtual_spaces) {}
1194 
1195     bool repeat() {
1196       return _virtual_spaces != NULL;
1197     }
1198 
1199     VirtualSpaceNode* get_next() {
1200       VirtualSpaceNode* result = _virtual_spaces;
1201       if (_virtual_spaces != NULL) {
1202         _virtual_spaces = _virtual_spaces->next();
1203       }
1204       return result;
1205     }
1206   };
1207 };
1208 
1209 class Metadebug : AllStatic {
1210   // Debugging support for Metaspaces
1211   static int _allocation_fail_alot_count;
1212 
1213  public:
1214 
1215   static void init_allocation_fail_alot_count();
1216 #ifdef ASSERT
1217   static bool test_metadata_failure();
1218 #endif
1219 };
1220 
1221 int Metadebug::_allocation_fail_alot_count = 0;
1222 
1223 //  SpaceManager - used by Metaspace to handle allocations
1224 class SpaceManager : public CHeapObj<mtClass> {
1225   friend class ClassLoaderMetaspace;
1226   friend class Metadebug;
1227 
1228  private:
1229 
1230   // protects allocations
1231   Mutex* const _lock;
1232 
1233   // Type of metadata allocated.
1234   const Metaspace::MetadataType   _mdtype;
1235 
1236   // Type of metaspace
1237   const Metaspace::MetaspaceType  _space_type;
1238 
1239   // List of chunks in use by this SpaceManager.  Allocations
1240   // are done from the current chunk.  The list is used for deallocating
1241   // chunks when the SpaceManager is freed.
1242   Metachunk* _chunks_in_use[NumberOfInUseLists];
1243   Metachunk* _current_chunk;
1244 
1245   // Maximum number of small chunks to allocate to a SpaceManager
1246   static uint const _small_chunk_limit;
1247 
1248   // Maximum number of specialize chunks to allocate for anonymous and delegating
1249   // metadata space to a SpaceManager
1250   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1251 
1252   // Sum of all space in allocated chunks
1253   size_t _allocated_blocks_words;
1254 
1255   // Sum of all allocated chunks
1256   size_t _allocated_chunks_words;
1257   size_t _allocated_chunks_count;
1258 
1259   // Free lists of blocks are per SpaceManager since they
1260   // are assumed to be in chunks in use by the SpaceManager
1261   // and all chunks in use by a SpaceManager are freed when
1262   // the class loader using the SpaceManager is collected.
1263   BlockFreelist* _block_freelists;
1264 
1265   // protects virtualspace and chunk expansions
1266   static const char*  _expand_lock_name;
1267   static const int    _expand_lock_rank;
1268   static Mutex* const _expand_lock;
1269 
1270  private:
1271   // Accessors
1272   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1273   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1274     _chunks_in_use[index] = v;
1275   }
1276 
1277   BlockFreelist* block_freelists() const { return _block_freelists; }
1278 
1279   Metaspace::MetadataType mdtype() { return _mdtype; }
1280 
1281   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1282   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1283 
1284   Metachunk* current_chunk() const { return _current_chunk; }
1285   void set_current_chunk(Metachunk* v) {
1286     _current_chunk = v;
1287   }
1288 
1289   Metachunk* find_current_chunk(size_t word_size);
1290 
1291   // Add chunk to the list of chunks in use
1292   void add_chunk(Metachunk* v, bool make_current);
1293   void retire_current_chunk();
1294 
1295   Mutex* lock() const { return _lock; }
1296 
1297  protected:
1298   void initialize();
1299 
1300  public:
1301   SpaceManager(Metaspace::MetadataType mdtype,
1302                Metaspace::MetaspaceType space_type,
1303                Mutex* lock);
1304   ~SpaceManager();
1305 
1306   enum ChunkMultiples {
1307     MediumChunkMultiple = 4
1308   };
1309 
1310   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1311   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1312   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1313 
1314   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1315 
1316   // Accessors
1317   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1318 
1319   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1320   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1321   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1322 
1323   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1324 
1325   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1326 
1327   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1328   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1329   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1330   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1331   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1332 
1333   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1334 
1335   static Mutex* expand_lock() { return _expand_lock; }
1336 
1337   // Increment the per Metaspace and global running sums for Metachunks
1338   // by the given size.  This is used when a Metachunk to added to
1339   // the in-use list.
1340   void inc_size_metrics(size_t words);
1341   // Increment the per Metaspace and global running sums Metablocks by the given
1342   // size.  This is used when a Metablock is allocated.
1343   void inc_used_metrics(size_t words);
1344   // Delete the portion of the running sums for this SpaceManager. That is,
1345   // the globals running sums for the Metachunks and Metablocks are
1346   // decremented for all the Metachunks in-use by this SpaceManager.
1347   void dec_total_from_size_metrics();
1348 
1349   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1350   // or return the unadjusted size if the requested size is humongous.
1351   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1352   size_t adjust_initial_chunk_size(size_t requested) const;
1353 
1354   // Get the initial chunks size for this metaspace type.
1355   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1356 
1357   size_t sum_capacity_in_chunks_in_use() const;
1358   size_t sum_used_in_chunks_in_use() const;
1359   size_t sum_free_in_chunks_in_use() const;
1360   size_t sum_waste_in_chunks_in_use() const;
1361   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1362 
1363   size_t sum_count_in_chunks_in_use();
1364   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1365 
1366   Metachunk* get_new_chunk(size_t chunk_word_size);
1367 
1368   // Block allocation and deallocation.
1369   // Allocates a block from the current chunk
1370   MetaWord* allocate(size_t word_size);
1371 
1372   // Helper for allocations
1373   MetaWord* allocate_work(size_t word_size);
1374 
1375   // Returns a block to the per manager freelist
1376   void deallocate(MetaWord* p, size_t word_size);
1377 
1378   // Based on the allocation size and a minimum chunk size,
1379   // returned chunk size (for expanding space for chunk allocation).
1380   size_t calc_chunk_size(size_t allocation_word_size);
1381 
1382   // Called when an allocation from the current chunk fails.
1383   // Gets a new chunk (may require getting a new virtual space),
1384   // and allocates from that chunk.
1385   MetaWord* grow_and_allocate(size_t word_size);
1386 
1387   // Notify memory usage to MemoryService.
1388   void track_metaspace_memory_usage();
1389 
1390   // debugging support.
1391 
1392   void dump(outputStream* const out) const;
1393   void print_on(outputStream* st) const;
1394   void locked_print_chunks_in_use_on(outputStream* st) const;
1395 
1396   void verify();
1397   void verify_chunk_size(Metachunk* chunk);
1398 #ifdef ASSERT
1399   void verify_allocated_blocks_words();
1400 #endif
1401 
1402   // This adjusts the size given to be greater than the minimum allocation size in
1403   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1404   size_t get_allocation_word_size(size_t word_size) {
1405     size_t byte_size = word_size * BytesPerWord;
1406 
1407     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1408     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1409 
1410     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1411     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1412 
1413     return raw_word_size;
1414   }
1415 };
1416 
1417 uint const SpaceManager::_small_chunk_limit = 4;
1418 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1419 
1420 const char* SpaceManager::_expand_lock_name =
1421   "SpaceManager chunk allocation lock";
1422 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1423 Mutex* const SpaceManager::_expand_lock =
1424   new Mutex(SpaceManager::_expand_lock_rank,
1425             SpaceManager::_expand_lock_name,
1426             Mutex::_allow_vm_block_flag,
1427             Monitor::_safepoint_check_never);
1428 
1429 void VirtualSpaceNode::inc_container_count() {
1430   assert_lock_strong(SpaceManager::expand_lock());
1431   _container_count++;
1432 }
1433 
1434 void VirtualSpaceNode::dec_container_count() {
1435   assert_lock_strong(SpaceManager::expand_lock());
1436   _container_count--;
1437 }
1438 
1439 #ifdef ASSERT
1440 void VirtualSpaceNode::verify_container_count() {
1441   assert(_container_count == container_count_slow(),
1442          "Inconsistency in container_count _container_count " UINTX_FORMAT
1443          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1444 }
1445 #endif
1446 
1447 // BlockFreelist methods
1448 
1449 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1450 
1451 BlockFreelist::~BlockFreelist() {
1452   delete _dictionary;
1453   if (_small_blocks != NULL) {
1454     delete _small_blocks;
1455   }
1456 }
1457 
1458 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1459   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1460 
1461   Metablock* free_chunk = ::new (p) Metablock(word_size);
1462   if (word_size < SmallBlocks::small_block_max_size()) {
1463     small_blocks()->return_block(free_chunk, word_size);
1464   } else {
1465   dictionary()->return_chunk(free_chunk);
1466 }
1467   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1468             SIZE_FORMAT, p2i(free_chunk), word_size);
1469 }
1470 
1471 MetaWord* BlockFreelist::get_block(size_t word_size) {
1472   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1473 
1474   // Try small_blocks first.
1475   if (word_size < SmallBlocks::small_block_max_size()) {
1476     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1477     // this space manager.
1478     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1479     if (new_block != NULL) {
1480       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1481               p2i(new_block), word_size);
1482       return new_block;
1483     }
1484   }
1485 
1486   if (word_size < BlockFreelist::min_dictionary_size()) {
1487     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1488     return NULL;
1489   }
1490 
1491   Metablock* free_block = dictionary()->get_chunk(word_size);
1492   if (free_block == NULL) {
1493     return NULL;
1494   }
1495 
1496   const size_t block_size = free_block->size();
1497   if (block_size > WasteMultiplier * word_size) {
1498     return_block((MetaWord*)free_block, block_size);
1499     return NULL;
1500   }
1501 
1502   MetaWord* new_block = (MetaWord*)free_block;
1503   assert(block_size >= word_size, "Incorrect size of block from freelist");
1504   const size_t unused = block_size - word_size;
1505   if (unused >= SmallBlocks::small_block_min_size()) {
1506     return_block(new_block + word_size, unused);
1507   }
1508 
1509   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1510             p2i(new_block), word_size);
1511   return new_block;
1512 }
1513 
1514 void BlockFreelist::print_on(outputStream* st) const {
1515   dictionary()->print_free_lists(st);
1516   if (_small_blocks != NULL) {
1517     _small_blocks->print_on(st);
1518   }
1519 }
1520 
1521 // VirtualSpaceNode methods
1522 
1523 VirtualSpaceNode::~VirtualSpaceNode() {
1524   _rs.release();
1525   if (_occupancy_map != NULL) {
1526     delete _occupancy_map;
1527   }
1528 #ifdef ASSERT
1529   size_t word_size = sizeof(*this) / BytesPerWord;
1530   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1531 #endif
1532 }
1533 
1534 size_t VirtualSpaceNode::used_words_in_vs() const {
1535   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1536 }
1537 
1538 // Space committed in the VirtualSpace
1539 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1540   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1541 }
1542 
1543 size_t VirtualSpaceNode::free_words_in_vs() const {
1544   return pointer_delta(end(), top(), sizeof(MetaWord));
1545 }
1546 
1547 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1548 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1549 
1550   assert(target_top > top(), "Sanity");
1551 
1552   // Padding chunks are added to the freelist.
1553   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1554 
1555   // shorthands
1556   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1557   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1558   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1559 
1560   while (top() < target_top) {
1561 
1562     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1563     // for padding chunks, so it is not worth it.
1564     size_t padding_chunk_word_size = small_word_size;
1565     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1566       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1567       padding_chunk_word_size = spec_word_size;
1568     }
1569     MetaWord* here = top();
1570     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1571     inc_top(padding_chunk_word_size);
1572 
1573     // Create new padding chunk.
1574     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1575     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1576 
1577     Metachunk* const padding_chunk =
1578       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1579     assert(padding_chunk == (Metachunk*)here, "Sanity");
1580     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1581     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1582                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1583                                        (is_class() ? "class space " : "metaspace"),
1584                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1585 
1586     // Mark chunk start in occupancy map.
1587     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1588 
1589     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1590     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1591     // will assert that).
1592     do_update_in_use_info_for_chunk(padding_chunk, true);
1593 
1594     // Return Chunk to freelist.
1595     inc_container_count();
1596     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1597     // Please note: at this point, ChunkManager::return_single_chunk()
1598     // may already have merged the padding chunk with neighboring chunks, so
1599     // it may have vanished at this point. Do not reference the padding
1600     // chunk beyond this point.
1601   }
1602 
1603   assert(top() == target_top, "Sanity");
1604 
1605 } // allocate_padding_chunks_until_top_is_at()
1606 
1607 // Allocates the chunk from the virtual space only.
1608 // This interface is also used internally for debugging.  Not all
1609 // chunks removed here are necessarily used for allocation.
1610 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1611   // Non-humongous chunks are to be allocated aligned to their chunk
1612   // size. So, start addresses of medium chunks are aligned to medium
1613   // chunk size, those of small chunks to small chunk size and so
1614   // forth. This facilitates merging of free chunks and reduces
1615   // fragmentation. Chunk sizes are spec < small < medium, with each
1616   // larger chunk size being a multiple of the next smaller chunk
1617   // size.
1618   // Because of this alignment, me may need to create a number of padding
1619   // chunks. These chunks are created and added to the freelist.
1620 
1621   // The chunk manager to which we will give our padding chunks.
1622   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1623 
1624   // shorthands
1625   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1626   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1627   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1628 
1629   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1630          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1631 
1632   // Chunk alignment (in bytes) == chunk size unless humongous.
1633   // Humongous chunks are aligned to the smallest chunk size (spec).
1634   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1635                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1636 
1637   // Do we have enough space to create the requested chunk plus
1638   // any padding chunks needed?
1639   MetaWord* const next_aligned =
1640     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1641   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1642     return NULL;
1643   }
1644 
1645   // Before allocating the requested chunk, allocate padding chunks if necessary.
1646   // We only need to do this for small or medium chunks: specialized chunks are the
1647   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1648   // (implicitly, also aligned to smallest chunk size).
1649   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1650     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1651         (is_class() ? "class space " : "metaspace"),
1652         top(), next_aligned);
1653     allocate_padding_chunks_until_top_is_at(next_aligned);
1654     // Now, top should be aligned correctly.
1655     assert_is_aligned(top(), required_chunk_alignment);
1656   }
1657 
1658   // Now, top should be aligned correctly.
1659   assert_is_aligned(top(), required_chunk_alignment);
1660 
1661   // Bottom of the new chunk
1662   MetaWord* chunk_limit = top();
1663   assert(chunk_limit != NULL, "Not safe to call this method");
1664 
1665   // The virtual spaces are always expanded by the
1666   // commit granularity to enforce the following condition.
1667   // Without this the is_available check will not work correctly.
1668   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1669       "The committed memory doesn't match the expanded memory.");
1670 
1671   if (!is_available(chunk_word_size)) {
1672     LogTarget(Debug, gc, metaspace, freelist) lt;
1673     if (lt.is_enabled()) {
1674       LogStream ls(lt);
1675       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1676       // Dump some information about the virtual space that is nearly full
1677       print_on(&ls);
1678     }
1679     return NULL;
1680   }
1681 
1682   // Take the space  (bump top on the current virtual space).
1683   inc_top(chunk_word_size);
1684 
1685   // Initialize the chunk
1686   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1687   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1688   assert(result == (Metachunk*)chunk_limit, "Sanity");
1689   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1690   do_update_in_use_info_for_chunk(result, true);
1691 
1692   inc_container_count();
1693 
1694   if (VerifyMetaspace) {
1695     DEBUG_ONLY(chunk_manager->locked_verify());
1696     DEBUG_ONLY(this->verify());
1697   }
1698 
1699   DEBUG_ONLY(do_verify_chunk(result));
1700 
1701   result->inc_use_count();
1702 
1703   return result;
1704 }
1705 
1706 
1707 // Expand the virtual space (commit more of the reserved space)
1708 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1709   size_t min_bytes = min_words * BytesPerWord;
1710   size_t preferred_bytes = preferred_words * BytesPerWord;
1711 
1712   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1713 
1714   if (uncommitted < min_bytes) {
1715     return false;
1716   }
1717 
1718   size_t commit = MIN2(preferred_bytes, uncommitted);
1719   bool result = virtual_space()->expand_by(commit, false);
1720 
1721   if (result) {
1722     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1723               (is_class() ? "class" : "non-class"), commit);
1724   } else {
1725     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1726               (is_class() ? "class" : "non-class"), commit);
1727   }
1728 
1729   assert(result, "Failed to commit memory");
1730 
1731   return result;
1732 }
1733 
1734 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1735   assert_lock_strong(SpaceManager::expand_lock());
1736   Metachunk* result = take_from_committed(chunk_word_size);
1737   return result;
1738 }
1739 
1740 bool VirtualSpaceNode::initialize() {
1741 
1742   if (!_rs.is_reserved()) {
1743     return false;
1744   }
1745 
1746   // These are necessary restriction to make sure that the virtual space always
1747   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1748   // aligned only the middle alignment of the VirtualSpace is used.
1749   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1750   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1751 
1752   // ReservedSpaces marked as special will have the entire memory
1753   // pre-committed. Setting a committed size will make sure that
1754   // committed_size and actual_committed_size agrees.
1755   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1756 
1757   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1758                                             Metaspace::commit_alignment());
1759   if (result) {
1760     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1761         "Checking that the pre-committed memory was registered by the VirtualSpace");
1762 
1763     set_top((MetaWord*)virtual_space()->low());
1764     set_reserved(MemRegion((HeapWord*)_rs.base(),
1765                  (HeapWord*)(_rs.base() + _rs.size())));
1766 
1767     assert(reserved()->start() == (HeapWord*) _rs.base(),
1768            "Reserved start was not set properly " PTR_FORMAT
1769            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1770     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1771            "Reserved size was not set properly " SIZE_FORMAT
1772            " != " SIZE_FORMAT, reserved()->word_size(),
1773            _rs.size() / BytesPerWord);
1774   }
1775 
1776   // Initialize Occupancy Map.
1777   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1778   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1779 
1780   return result;
1781 }
1782 
1783 void VirtualSpaceNode::print_on(outputStream* st) const {
1784   size_t used = used_words_in_vs();
1785   size_t capacity = capacity_words_in_vs();
1786   VirtualSpace* vs = virtual_space();
1787   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1788            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1789            PTR_FORMAT ", " PTR_FORMAT ")",
1790            p2i(vs), capacity / K,
1791            capacity == 0 ? 0 : used * 100 / capacity,
1792            p2i(bottom()), p2i(top()), p2i(end()),
1793            p2i(vs->high_boundary()));
1794 }
1795 
1796 #ifdef ASSERT
1797 void VirtualSpaceNode::mangle() {
1798   size_t word_size = capacity_words_in_vs();
1799   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1800 }
1801 #endif // ASSERT
1802 
1803 // VirtualSpaceList methods
1804 // Space allocated from the VirtualSpace
1805 
1806 VirtualSpaceList::~VirtualSpaceList() {
1807   VirtualSpaceListIterator iter(virtual_space_list());
1808   while (iter.repeat()) {
1809     VirtualSpaceNode* vsl = iter.get_next();
1810     delete vsl;
1811   }
1812 }
1813 
1814 void VirtualSpaceList::inc_reserved_words(size_t v) {
1815   assert_lock_strong(SpaceManager::expand_lock());
1816   _reserved_words = _reserved_words + v;
1817 }
1818 void VirtualSpaceList::dec_reserved_words(size_t v) {
1819   assert_lock_strong(SpaceManager::expand_lock());
1820   _reserved_words = _reserved_words - v;
1821 }
1822 
1823 #define assert_committed_below_limit()                        \
1824   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1825          "Too much committed memory. Committed: " SIZE_FORMAT \
1826          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1827           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1828 
1829 void VirtualSpaceList::inc_committed_words(size_t v) {
1830   assert_lock_strong(SpaceManager::expand_lock());
1831   _committed_words = _committed_words + v;
1832 
1833   assert_committed_below_limit();
1834 }
1835 void VirtualSpaceList::dec_committed_words(size_t v) {
1836   assert_lock_strong(SpaceManager::expand_lock());
1837   _committed_words = _committed_words - v;
1838 
1839   assert_committed_below_limit();
1840 }
1841 
1842 void VirtualSpaceList::inc_virtual_space_count() {
1843   assert_lock_strong(SpaceManager::expand_lock());
1844   _virtual_space_count++;
1845 }
1846 void VirtualSpaceList::dec_virtual_space_count() {
1847   assert_lock_strong(SpaceManager::expand_lock());
1848   _virtual_space_count--;
1849 }
1850 
1851 void ChunkManager::remove_chunk(Metachunk* chunk) {
1852   size_t word_size = chunk->word_size();
1853   ChunkIndex index = list_index(word_size);
1854   if (index != HumongousIndex) {
1855     free_chunks(index)->remove_chunk(chunk);
1856   } else {
1857     humongous_dictionary()->remove_chunk(chunk);
1858   }
1859 
1860   // Chunk has been removed from the chunks free list, update counters.
1861   account_for_removed_chunk(chunk);
1862 }
1863 
1864 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1865   assert_lock_strong(SpaceManager::expand_lock());
1866   assert(chunk != NULL, "invalid chunk pointer");
1867   // Check for valid merge combinations.
1868   assert((chunk->get_chunk_type() == SpecializedIndex &&
1869           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1870          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1871         "Invalid chunk merge combination.");
1872 
1873   const size_t target_chunk_word_size =
1874     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1875 
1876   // [ prospective merge region )
1877   MetaWord* const p_merge_region_start =
1878     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1879   MetaWord* const p_merge_region_end =
1880     p_merge_region_start + target_chunk_word_size;
1881 
1882   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1883   VirtualSpaceNode* const vsn = chunk->container();
1884   OccupancyMap* const ocmap = vsn->occupancy_map();
1885 
1886   // The prospective chunk merge range must be completely contained by the
1887   // committed range of the virtual space node.
1888   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1889     return false;
1890   }
1891 
1892   // Only attempt to merge this range if at its start a chunk starts and at its end
1893   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1894   // of that range, we cannot merge.
1895   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1896     return false;
1897   }
1898   if (p_merge_region_end < vsn->top() &&
1899       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1900     return false;
1901   }
1902 
1903   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1904   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1905     return false;
1906   }
1907 
1908   // Success! Remove all chunks in this region...
1909   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1910     (is_class() ? "class space" : "metaspace"),
1911     p_merge_region_start, p_merge_region_end);
1912 
1913   const int num_chunks_removed =
1914     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1915 
1916   // ... and create a single new bigger chunk.
1917   Metachunk* const p_new_chunk =
1918       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1919   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1920   p_new_chunk->set_origin(origin_merge);
1921 
1922   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1923     (is_class() ? "class space" : "metaspace"),
1924     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1925 
1926   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1927   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1928   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1929 
1930   // Mark chunk as free. Note: it is not necessary to update the occupancy
1931   // map in-use map, because the old chunks were also free, so nothing
1932   // should have changed.
1933   p_new_chunk->set_is_tagged_free(true);
1934 
1935   // Add new chunk to its freelist.
1936   ChunkList* const list = free_chunks(target_chunk_type);
1937   list->return_chunk_at_head(p_new_chunk);
1938 
1939   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1940   // should not have changed, because the size of the space should be the same)
1941   _free_chunks_count -= num_chunks_removed;
1942   _free_chunks_count ++;
1943 
1944   // VirtualSpaceNode::container_count does not have to be modified:
1945   // it means "number of active (non-free) chunks", so merging free chunks
1946   // should not affect that count.
1947 
1948   // At the end of a chunk merge, run verification tests.
1949   if (VerifyMetaspace) {
1950     DEBUG_ONLY(this->locked_verify());
1951     DEBUG_ONLY(vsn->verify());
1952   }
1953 
1954   return true;
1955 }
1956 
1957 // Remove all chunks in the given area - the chunks are supposed to be free -
1958 // from their corresponding freelists. Mark them as invalid.
1959 // - This does not correct the occupancy map.
1960 // - This does not adjust the counters in ChunkManager.
1961 // - Does not adjust container count counter in containing VirtualSpaceNode
1962 // Returns number of chunks removed.
1963 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1964   assert(p != NULL && word_size > 0, "Invalid range.");
1965   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1966   assert_is_aligned(word_size, smallest_chunk_size);
1967 
1968   Metachunk* const start = (Metachunk*) p;
1969   const Metachunk* const end = (Metachunk*)(p + word_size);
1970   Metachunk* cur = start;
1971   int num_removed = 0;
1972   while (cur < end) {
1973     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1974     DEBUG_ONLY(do_verify_chunk(cur));
1975     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1976     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1977     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1978       (is_class() ? "class space" : "metaspace"),
1979       cur, cur->word_size() * sizeof(MetaWord));
1980     cur->remove_sentinel();
1981     // Note: cannot call ChunkManager::remove_chunk, because that
1982     // modifies the counters in ChunkManager, which we do not want. So
1983     // we call remove_chunk on the freelist directly (see also the
1984     // splitting function which does the same).
1985     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1986     list->remove_chunk(cur);
1987     num_removed ++;
1988     cur = next;
1989   }
1990   return num_removed;
1991 }
1992 
1993 // Walk the list of VirtualSpaceNodes and delete
1994 // nodes with a 0 container_count.  Remove Metachunks in
1995 // the node from their respective freelists.
1996 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1997   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1998   assert_lock_strong(SpaceManager::expand_lock());
1999   // Don't use a VirtualSpaceListIterator because this
2000   // list is being changed and a straightforward use of an iterator is not safe.
2001   VirtualSpaceNode* purged_vsl = NULL;
2002   VirtualSpaceNode* prev_vsl = virtual_space_list();
2003   VirtualSpaceNode* next_vsl = prev_vsl;
2004   while (next_vsl != NULL) {
2005     VirtualSpaceNode* vsl = next_vsl;
2006     DEBUG_ONLY(vsl->verify_container_count();)
2007     next_vsl = vsl->next();
2008     // Don't free the current virtual space since it will likely
2009     // be needed soon.
2010     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2011       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2012                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2013       // Unlink it from the list
2014       if (prev_vsl == vsl) {
2015         // This is the case of the current node being the first node.
2016         assert(vsl == virtual_space_list(), "Expected to be the first node");
2017         set_virtual_space_list(vsl->next());
2018       } else {
2019         prev_vsl->set_next(vsl->next());
2020       }
2021 
2022       vsl->purge(chunk_manager);
2023       dec_reserved_words(vsl->reserved_words());
2024       dec_committed_words(vsl->committed_words());
2025       dec_virtual_space_count();
2026       purged_vsl = vsl;
2027       delete vsl;
2028     } else {
2029       prev_vsl = vsl;
2030     }
2031   }
2032 #ifdef ASSERT
2033   if (purged_vsl != NULL) {
2034     // List should be stable enough to use an iterator here.
2035     VirtualSpaceListIterator iter(virtual_space_list());
2036     while (iter.repeat()) {
2037       VirtualSpaceNode* vsl = iter.get_next();
2038       assert(vsl != purged_vsl, "Purge of vsl failed");
2039     }
2040   }
2041 #endif
2042 }
2043 
2044 
2045 // This function looks at the mmap regions in the metaspace without locking.
2046 // The chunks are added with store ordering and not deleted except for at
2047 // unloading time during a safepoint.
2048 bool VirtualSpaceList::contains(const void* ptr) {
2049   // List should be stable enough to use an iterator here because removing virtual
2050   // space nodes is only allowed at a safepoint.
2051   VirtualSpaceListIterator iter(virtual_space_list());
2052   while (iter.repeat()) {
2053     VirtualSpaceNode* vsn = iter.get_next();
2054     if (vsn->contains(ptr)) {
2055       return true;
2056     }
2057   }
2058   return false;
2059 }
2060 
2061 void VirtualSpaceList::retire_current_virtual_space() {
2062   assert_lock_strong(SpaceManager::expand_lock());
2063 
2064   VirtualSpaceNode* vsn = current_virtual_space();
2065 
2066   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2067                                   Metaspace::chunk_manager_metadata();
2068 
2069   vsn->retire(cm);
2070 }
2071 
2072 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2073   DEBUG_ONLY(verify_container_count();)
2074   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2075   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2076     ChunkIndex index = (ChunkIndex)i;
2077     size_t chunk_size = chunk_manager->size_by_index(index);
2078 
2079     while (free_words_in_vs() >= chunk_size) {
2080       Metachunk* chunk = get_chunk_vs(chunk_size);
2081       // Chunk will be allocated aligned, so allocation may require
2082       // additional padding chunks. That may cause above allocation to
2083       // fail. Just ignore the failed allocation and continue with the
2084       // next smaller chunk size. As the VirtualSpaceNode comitted
2085       // size should be a multiple of the smallest chunk size, we
2086       // should always be able to fill the VirtualSpace completely.
2087       if (chunk == NULL) {
2088         break;
2089       }
2090       chunk_manager->return_single_chunk(index, chunk);
2091     }
2092     DEBUG_ONLY(verify_container_count();)
2093   }
2094   assert(free_words_in_vs() == 0, "should be empty now");
2095 }
2096 
2097 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2098                                    _is_class(false),
2099                                    _virtual_space_list(NULL),
2100                                    _current_virtual_space(NULL),
2101                                    _reserved_words(0),
2102                                    _committed_words(0),
2103                                    _virtual_space_count(0) {
2104   MutexLockerEx cl(SpaceManager::expand_lock(),
2105                    Mutex::_no_safepoint_check_flag);
2106   create_new_virtual_space(word_size);
2107 }
2108 
2109 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2110                                    _is_class(true),
2111                                    _virtual_space_list(NULL),
2112                                    _current_virtual_space(NULL),
2113                                    _reserved_words(0),
2114                                    _committed_words(0),
2115                                    _virtual_space_count(0) {
2116   MutexLockerEx cl(SpaceManager::expand_lock(),
2117                    Mutex::_no_safepoint_check_flag);
2118   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2119   bool succeeded = class_entry->initialize();
2120   if (succeeded) {
2121     link_vs(class_entry);
2122   }
2123 }
2124 
2125 size_t VirtualSpaceList::free_bytes() {
2126   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2127 }
2128 
2129 // Allocate another meta virtual space and add it to the list.
2130 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2131   assert_lock_strong(SpaceManager::expand_lock());
2132 
2133   if (is_class()) {
2134     assert(false, "We currently don't support more than one VirtualSpace for"
2135                   " the compressed class space. The initialization of the"
2136                   " CCS uses another code path and should not hit this path.");
2137     return false;
2138   }
2139 
2140   if (vs_word_size == 0) {
2141     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2142     return false;
2143   }
2144 
2145   // Reserve the space
2146   size_t vs_byte_size = vs_word_size * BytesPerWord;
2147   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2148 
2149   // Allocate the meta virtual space and initialize it.
2150   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2151   if (!new_entry->initialize()) {
2152     delete new_entry;
2153     return false;
2154   } else {
2155     assert(new_entry->reserved_words() == vs_word_size,
2156         "Reserved memory size differs from requested memory size");
2157     // ensure lock-free iteration sees fully initialized node
2158     OrderAccess::storestore();
2159     link_vs(new_entry);
2160     return true;
2161   }
2162 }
2163 
2164 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2165   if (virtual_space_list() == NULL) {
2166       set_virtual_space_list(new_entry);
2167   } else {
2168     current_virtual_space()->set_next(new_entry);
2169   }
2170   set_current_virtual_space(new_entry);
2171   inc_reserved_words(new_entry->reserved_words());
2172   inc_committed_words(new_entry->committed_words());
2173   inc_virtual_space_count();
2174 #ifdef ASSERT
2175   new_entry->mangle();
2176 #endif
2177   LogTarget(Trace, gc, metaspace) lt;
2178   if (lt.is_enabled()) {
2179     LogStream ls(lt);
2180     VirtualSpaceNode* vsl = current_virtual_space();
2181     ResourceMark rm;
2182     vsl->print_on(&ls);
2183   }
2184 }
2185 
2186 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2187                                       size_t min_words,
2188                                       size_t preferred_words) {
2189   size_t before = node->committed_words();
2190 
2191   bool result = node->expand_by(min_words, preferred_words);
2192 
2193   size_t after = node->committed_words();
2194 
2195   // after and before can be the same if the memory was pre-committed.
2196   assert(after >= before, "Inconsistency");
2197   inc_committed_words(after - before);
2198 
2199   return result;
2200 }
2201 
2202 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2203   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2204   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2205   assert(min_words <= preferred_words, "Invalid arguments");
2206 
2207   const char* const class_or_not = (is_class() ? "class" : "non-class");
2208 
2209   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2210     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2211               class_or_not);
2212     return  false;
2213   }
2214 
2215   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2216   if (allowed_expansion_words < min_words) {
2217     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2218               class_or_not);
2219     return false;
2220   }
2221 
2222   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2223 
2224   // Commit more memory from the the current virtual space.
2225   bool vs_expanded = expand_node_by(current_virtual_space(),
2226                                     min_words,
2227                                     max_expansion_words);
2228   if (vs_expanded) {
2229      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2230                class_or_not);
2231      return true;
2232   }
2233   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2234             class_or_not);
2235   retire_current_virtual_space();
2236 
2237   // Get another virtual space.
2238   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2239   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2240 
2241   if (create_new_virtual_space(grow_vs_words)) {
2242     if (current_virtual_space()->is_pre_committed()) {
2243       // The memory was pre-committed, so we are done here.
2244       assert(min_words <= current_virtual_space()->committed_words(),
2245           "The new VirtualSpace was pre-committed, so it"
2246           "should be large enough to fit the alloc request.");
2247       return true;
2248     }
2249 
2250     return expand_node_by(current_virtual_space(),
2251                           min_words,
2252                           max_expansion_words);
2253   }
2254 
2255   return false;
2256 }
2257 
2258 // Given a chunk, calculate the largest possible padding space which
2259 // could be required when allocating it.
2260 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2261   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2262   if (chunk_type != HumongousIndex) {
2263     // Normal, non-humongous chunks are allocated at chunk size
2264     // boundaries, so the largest padding space required would be that
2265     // minus the smallest chunk size.
2266     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2267     return chunk_word_size - smallest_chunk_size;
2268   } else {
2269     // Humongous chunks are allocated at smallest-chunksize
2270     // boundaries, so there is no padding required.
2271     return 0;
2272   }
2273 }
2274 
2275 
2276 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2277 
2278   // Allocate a chunk out of the current virtual space.
2279   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2280 
2281   if (next != NULL) {
2282     return next;
2283   }
2284 
2285   // The expand amount is currently only determined by the requested sizes
2286   // and not how much committed memory is left in the current virtual space.
2287 
2288   // We must have enough space for the requested size and any
2289   // additional reqired padding chunks.
2290   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2291 
2292   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2293   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2294   if (min_word_size >= preferred_word_size) {
2295     // Can happen when humongous chunks are allocated.
2296     preferred_word_size = min_word_size;
2297   }
2298 
2299   bool expanded = expand_by(min_word_size, preferred_word_size);
2300   if (expanded) {
2301     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2302     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2303   }
2304 
2305    return next;
2306 }
2307 
2308 void VirtualSpaceList::print_on(outputStream* st) const {
2309   VirtualSpaceListIterator iter(virtual_space_list());
2310   while (iter.repeat()) {
2311     VirtualSpaceNode* node = iter.get_next();
2312     node->print_on(st);
2313   }
2314 }
2315 
2316 void VirtualSpaceList::print_map(outputStream* st) const {
2317   VirtualSpaceNode* list = virtual_space_list();
2318   VirtualSpaceListIterator iter(list);
2319   unsigned i = 0;
2320   while (iter.repeat()) {
2321     st->print_cr("Node %u:", i);
2322     VirtualSpaceNode* node = iter.get_next();
2323     node->print_map(st, this->is_class());
2324     i ++;
2325   }
2326 }
2327 
2328 // MetaspaceGC methods
2329 
2330 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2331 // Within the VM operation after the GC the attempt to allocate the metadata
2332 // should succeed.  If the GC did not free enough space for the metaspace
2333 // allocation, the HWM is increased so that another virtualspace will be
2334 // allocated for the metadata.  With perm gen the increase in the perm
2335 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2336 // metaspace policy uses those as the small and large steps for the HWM.
2337 //
2338 // After the GC the compute_new_size() for MetaspaceGC is called to
2339 // resize the capacity of the metaspaces.  The current implementation
2340 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2341 // to resize the Java heap by some GC's.  New flags can be implemented
2342 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2343 // free space is desirable in the metaspace capacity to decide how much
2344 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2345 // free space is desirable in the metaspace capacity before decreasing
2346 // the HWM.
2347 
2348 // Calculate the amount to increase the high water mark (HWM).
2349 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2350 // another expansion is not requested too soon.  If that is not
2351 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2352 // If that is still not enough, expand by the size of the allocation
2353 // plus some.
2354 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2355   size_t min_delta = MinMetaspaceExpansion;
2356   size_t max_delta = MaxMetaspaceExpansion;
2357   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2358 
2359   if (delta <= min_delta) {
2360     delta = min_delta;
2361   } else if (delta <= max_delta) {
2362     // Don't want to hit the high water mark on the next
2363     // allocation so make the delta greater than just enough
2364     // for this allocation.
2365     delta = max_delta;
2366   } else {
2367     // This allocation is large but the next ones are probably not
2368     // so increase by the minimum.
2369     delta = delta + min_delta;
2370   }
2371 
2372   assert_is_aligned(delta, Metaspace::commit_alignment());
2373 
2374   return delta;
2375 }
2376 
2377 size_t MetaspaceGC::capacity_until_GC() {
2378   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2379   assert(value >= MetaspaceSize, "Not initialized properly?");
2380   return value;
2381 }
2382 
2383 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2384   assert_is_aligned(v, Metaspace::commit_alignment());
2385 
2386   intptr_t capacity_until_GC = _capacity_until_GC;
2387   intptr_t new_value = capacity_until_GC + v;
2388 
2389   if (new_value < capacity_until_GC) {
2390     // The addition wrapped around, set new_value to aligned max value.
2391     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2392   }
2393 
2394   intptr_t expected = _capacity_until_GC;
2395   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2396 
2397   if (expected != actual) {
2398     return false;
2399   }
2400 
2401   if (new_cap_until_GC != NULL) {
2402     *new_cap_until_GC = new_value;
2403   }
2404   if (old_cap_until_GC != NULL) {
2405     *old_cap_until_GC = capacity_until_GC;
2406   }
2407   return true;
2408 }
2409 
2410 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2411   assert_is_aligned(v, Metaspace::commit_alignment());
2412 
2413   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2414 }
2415 
2416 void MetaspaceGC::initialize() {
2417   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2418   // we can't do a GC during initialization.
2419   _capacity_until_GC = MaxMetaspaceSize;
2420 }
2421 
2422 void MetaspaceGC::post_initialize() {
2423   // Reset the high-water mark once the VM initialization is done.
2424   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2425 }
2426 
2427 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2428   // Check if the compressed class space is full.
2429   if (is_class && Metaspace::using_class_space()) {
2430     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2431     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2432       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2433                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2434       return false;
2435     }
2436   }
2437 
2438   // Check if the user has imposed a limit on the metaspace memory.
2439   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2440   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2441     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2442               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2443     return false;
2444   }
2445 
2446   return true;
2447 }
2448 
2449 size_t MetaspaceGC::allowed_expansion() {
2450   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2451   size_t capacity_until_gc = capacity_until_GC();
2452 
2453   assert(capacity_until_gc >= committed_bytes,
2454          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2455          capacity_until_gc, committed_bytes);
2456 
2457   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2458   size_t left_until_GC = capacity_until_gc - committed_bytes;
2459   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2460   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2461             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2462             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2463 
2464   return left_to_commit / BytesPerWord;
2465 }
2466 
2467 void MetaspaceGC::compute_new_size() {
2468   assert(_shrink_factor <= 100, "invalid shrink factor");
2469   uint current_shrink_factor = _shrink_factor;
2470   _shrink_factor = 0;
2471 
2472   // Using committed_bytes() for used_after_gc is an overestimation, since the
2473   // chunk free lists are included in committed_bytes() and the memory in an
2474   // un-fragmented chunk free list is available for future allocations.
2475   // However, if the chunk free lists becomes fragmented, then the memory may
2476   // not be available for future allocations and the memory is therefore "in use".
2477   // Including the chunk free lists in the definition of "in use" is therefore
2478   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2479   // shrink below committed_bytes() and this has caused serious bugs in the past.
2480   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2481   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2482 
2483   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2484   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2485 
2486   const double min_tmp = used_after_gc / maximum_used_percentage;
2487   size_t minimum_desired_capacity =
2488     (size_t)MIN2(min_tmp, double(max_uintx));
2489   // Don't shrink less than the initial generation size
2490   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2491                                   MetaspaceSize);
2492 
2493   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2494   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2495                            minimum_free_percentage, maximum_used_percentage);
2496   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2497 
2498 
2499   size_t shrink_bytes = 0;
2500   if (capacity_until_GC < minimum_desired_capacity) {
2501     // If we have less capacity below the metaspace HWM, then
2502     // increment the HWM.
2503     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2504     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2505     // Don't expand unless it's significant
2506     if (expand_bytes >= MinMetaspaceExpansion) {
2507       size_t new_capacity_until_GC = 0;
2508       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2509       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2510 
2511       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2512                                                new_capacity_until_GC,
2513                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2514       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2515                                minimum_desired_capacity / (double) K,
2516                                expand_bytes / (double) K,
2517                                MinMetaspaceExpansion / (double) K,
2518                                new_capacity_until_GC / (double) K);
2519     }
2520     return;
2521   }
2522 
2523   // No expansion, now see if we want to shrink
2524   // We would never want to shrink more than this
2525   assert(capacity_until_GC >= minimum_desired_capacity,
2526          SIZE_FORMAT " >= " SIZE_FORMAT,
2527          capacity_until_GC, minimum_desired_capacity);
2528   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2529 
2530   // Should shrinking be considered?
2531   if (MaxMetaspaceFreeRatio < 100) {
2532     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2533     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2534     const double max_tmp = used_after_gc / minimum_used_percentage;
2535     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2536     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2537                                     MetaspaceSize);
2538     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2539                              maximum_free_percentage, minimum_used_percentage);
2540     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2541                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2542 
2543     assert(minimum_desired_capacity <= maximum_desired_capacity,
2544            "sanity check");
2545 
2546     if (capacity_until_GC > maximum_desired_capacity) {
2547       // Capacity too large, compute shrinking size
2548       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2549       // We don't want shrink all the way back to initSize if people call
2550       // System.gc(), because some programs do that between "phases" and then
2551       // we'd just have to grow the heap up again for the next phase.  So we
2552       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2553       // on the third call, and 100% by the fourth call.  But if we recompute
2554       // size without shrinking, it goes back to 0%.
2555       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2556 
2557       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2558 
2559       assert(shrink_bytes <= max_shrink_bytes,
2560              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2561              shrink_bytes, max_shrink_bytes);
2562       if (current_shrink_factor == 0) {
2563         _shrink_factor = 10;
2564       } else {
2565         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2566       }
2567       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2568                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2569       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2570                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2571     }
2572   }
2573 
2574   // Don't shrink unless it's significant
2575   if (shrink_bytes >= MinMetaspaceExpansion &&
2576       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2577     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2578     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2579                                              new_capacity_until_GC,
2580                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2581   }
2582 }
2583 
2584 // Metadebug methods
2585 
2586 void Metadebug::init_allocation_fail_alot_count() {
2587   if (MetadataAllocationFailALot) {
2588     _allocation_fail_alot_count =
2589       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2590   }
2591 }
2592 
2593 #ifdef ASSERT
2594 bool Metadebug::test_metadata_failure() {
2595   if (MetadataAllocationFailALot &&
2596       Threads::is_vm_complete()) {
2597     if (_allocation_fail_alot_count > 0) {
2598       _allocation_fail_alot_count--;
2599     } else {
2600       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2601       init_allocation_fail_alot_count();
2602       return true;
2603     }
2604   }
2605   return false;
2606 }
2607 #endif
2608 
2609 // ChunkManager methods
2610 size_t ChunkManager::free_chunks_total_words() {
2611   return _free_chunks_total;
2612 }
2613 
2614 size_t ChunkManager::free_chunks_total_bytes() {
2615   return free_chunks_total_words() * BytesPerWord;
2616 }
2617 
2618 // Update internal accounting after a chunk was added
2619 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2620   assert_lock_strong(SpaceManager::expand_lock());
2621   _free_chunks_count ++;
2622   _free_chunks_total += c->word_size();
2623 }
2624 
2625 // Update internal accounting after a chunk was removed
2626 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2627   assert_lock_strong(SpaceManager::expand_lock());
2628   assert(_free_chunks_count >= 1,
2629     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2630   assert(_free_chunks_total >= c->word_size(),
2631     "ChunkManager::_free_chunks_total: about to go negative"
2632      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2633   _free_chunks_count --;
2634   _free_chunks_total -= c->word_size();
2635 }
2636 
2637 size_t ChunkManager::free_chunks_count() {
2638 #ifdef ASSERT
2639   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2640     MutexLockerEx cl(SpaceManager::expand_lock(),
2641                      Mutex::_no_safepoint_check_flag);
2642     // This lock is only needed in debug because the verification
2643     // of the _free_chunks_totals walks the list of free chunks
2644     slow_locked_verify_free_chunks_count();
2645   }
2646 #endif
2647   return _free_chunks_count;
2648 }
2649 
2650 ChunkIndex ChunkManager::list_index(size_t size) {
2651   if (size_by_index(SpecializedIndex) == size) {
2652     return SpecializedIndex;
2653   }
2654   if (size_by_index(SmallIndex) == size) {
2655     return SmallIndex;
2656   }
2657   const size_t med_size = size_by_index(MediumIndex);
2658   if (med_size == size) {
2659     return MediumIndex;
2660   }
2661 
2662   assert(size > med_size, "Not a humongous chunk");
2663   return HumongousIndex;
2664 }
2665 
2666 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2667   index_bounds_check(index);
2668   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2669   return _free_chunks[index].size();
2670 }
2671 
2672 void ChunkManager::locked_verify_free_chunks_total() {
2673   assert_lock_strong(SpaceManager::expand_lock());
2674   assert(sum_free_chunks() == _free_chunks_total,
2675          "_free_chunks_total " SIZE_FORMAT " is not the"
2676          " same as sum " SIZE_FORMAT, _free_chunks_total,
2677          sum_free_chunks());
2678 }
2679 
2680 void ChunkManager::verify_free_chunks_total() {
2681   MutexLockerEx cl(SpaceManager::expand_lock(),
2682                      Mutex::_no_safepoint_check_flag);
2683   locked_verify_free_chunks_total();
2684 }
2685 
2686 void ChunkManager::locked_verify_free_chunks_count() {
2687   assert_lock_strong(SpaceManager::expand_lock());
2688   assert(sum_free_chunks_count() == _free_chunks_count,
2689          "_free_chunks_count " SIZE_FORMAT " is not the"
2690          " same as sum " SIZE_FORMAT, _free_chunks_count,
2691          sum_free_chunks_count());
2692 }
2693 
2694 void ChunkManager::verify_free_chunks_count() {
2695 #ifdef ASSERT
2696   MutexLockerEx cl(SpaceManager::expand_lock(),
2697                      Mutex::_no_safepoint_check_flag);
2698   locked_verify_free_chunks_count();
2699 #endif
2700 }
2701 
2702 void ChunkManager::verify() {
2703   MutexLockerEx cl(SpaceManager::expand_lock(),
2704                      Mutex::_no_safepoint_check_flag);
2705   locked_verify();
2706 }
2707 
2708 void ChunkManager::locked_verify() {
2709   locked_verify_free_chunks_count();
2710   locked_verify_free_chunks_total();
2711   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2712     ChunkList* list = free_chunks(i);
2713     if (list != NULL) {
2714       Metachunk* chunk = list->head();
2715       while (chunk) {
2716         DEBUG_ONLY(do_verify_chunk(chunk);)
2717         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2718         chunk = chunk->next();
2719       }
2720     }
2721   }
2722 }
2723 
2724 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2725   assert_lock_strong(SpaceManager::expand_lock());
2726   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2727                 _free_chunks_total, _free_chunks_count);
2728 }
2729 
2730 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2731   assert_lock_strong(SpaceManager::expand_lock());
2732   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2733                 sum_free_chunks(), sum_free_chunks_count());
2734 }
2735 
2736 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2737   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2738          "Bad index: %d", (int)index);
2739 
2740   return &_free_chunks[index];
2741 }
2742 
2743 // These methods that sum the free chunk lists are used in printing
2744 // methods that are used in product builds.
2745 size_t ChunkManager::sum_free_chunks() {
2746   assert_lock_strong(SpaceManager::expand_lock());
2747   size_t result = 0;
2748   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2749     ChunkList* list = free_chunks(i);
2750 
2751     if (list == NULL) {
2752       continue;
2753     }
2754 
2755     result = result + list->count() * list->size();
2756   }
2757   result = result + humongous_dictionary()->total_size();
2758   return result;
2759 }
2760 
2761 size_t ChunkManager::sum_free_chunks_count() {
2762   assert_lock_strong(SpaceManager::expand_lock());
2763   size_t count = 0;
2764   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2765     ChunkList* list = free_chunks(i);
2766     if (list == NULL) {
2767       continue;
2768     }
2769     count = count + list->count();
2770   }
2771   count = count + humongous_dictionary()->total_free_blocks();
2772   return count;
2773 }
2774 
2775 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2776   ChunkIndex index = list_index(word_size);
2777   assert(index < HumongousIndex, "No humongous list");
2778   return free_chunks(index);
2779 }
2780 
2781 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2782 // split up the larger chunk into n smaller chunks, at least one of which should be
2783 // the target chunk of target chunk size. The smaller chunks, including the target
2784 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2785 // Note that this chunk is supposed to be removed from the freelist right away.
2786 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2787   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2788 
2789   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2790   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2791 
2792   MetaWord* const region_start = (MetaWord*)larger_chunk;
2793   const size_t region_word_len = larger_chunk->word_size();
2794   MetaWord* const region_end = region_start + region_word_len;
2795   VirtualSpaceNode* const vsn = larger_chunk->container();
2796   OccupancyMap* const ocmap = vsn->occupancy_map();
2797 
2798   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2799   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2800   // at an address suitable to place the smaller target chunk.
2801   assert_is_aligned(region_start, target_chunk_word_size);
2802 
2803   // Remove old chunk.
2804   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2805   larger_chunk->remove_sentinel();
2806 
2807   // Prevent access to the old chunk from here on.
2808   larger_chunk = NULL;
2809   // ... and wipe it.
2810   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2811 
2812   // In its place create first the target chunk...
2813   MetaWord* p = region_start;
2814   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2815   assert(target_chunk == (Metachunk*)p, "Sanity");
2816   target_chunk->set_origin(origin_split);
2817 
2818   // Note: we do not need to mark its start in the occupancy map
2819   // because it coincides with the old chunk start.
2820 
2821   // Mark chunk as free and return to the freelist.
2822   do_update_in_use_info_for_chunk(target_chunk, false);
2823   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2824 
2825   // This chunk should now be valid and can be verified.
2826   DEBUG_ONLY(do_verify_chunk(target_chunk));
2827 
2828   // In the remaining space create the remainder chunks.
2829   p += target_chunk->word_size();
2830   assert(p < region_end, "Sanity");
2831 
2832   while (p < region_end) {
2833 
2834     // Find the largest chunk size which fits the alignment requirements at address p.
2835     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2836     size_t this_chunk_word_size = 0;
2837     for(;;) {
2838       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2839       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2840         break;
2841       } else {
2842         this_chunk_index = prev_chunk_index(this_chunk_index);
2843         assert(this_chunk_index >= target_chunk_index, "Sanity");
2844       }
2845     }
2846 
2847     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2848     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2849     assert(p + this_chunk_word_size <= region_end, "Sanity");
2850 
2851     // Create splitting chunk.
2852     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2853     assert(this_chunk == (Metachunk*)p, "Sanity");
2854     this_chunk->set_origin(origin_split);
2855     ocmap->set_chunk_starts_at_address(p, true);
2856     do_update_in_use_info_for_chunk(this_chunk, false);
2857 
2858     // This chunk should be valid and can be verified.
2859     DEBUG_ONLY(do_verify_chunk(this_chunk));
2860 
2861     // Return this chunk to freelist and correct counter.
2862     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2863     _free_chunks_count ++;
2864 
2865     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2866       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2867       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2868       p2i(region_start), p2i(region_end));
2869 
2870     p += this_chunk_word_size;
2871 
2872   }
2873 
2874   return target_chunk;
2875 }
2876 
2877 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2878   assert_lock_strong(SpaceManager::expand_lock());
2879 
2880   slow_locked_verify();
2881 
2882   Metachunk* chunk = NULL;
2883   bool we_did_split_a_chunk = false;
2884 
2885   if (list_index(word_size) != HumongousIndex) {
2886 
2887     ChunkList* free_list = find_free_chunks_list(word_size);
2888     assert(free_list != NULL, "Sanity check");
2889 
2890     chunk = free_list->head();
2891 
2892     if (chunk == NULL) {
2893       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2894       // This is the counterpart of the coalescing-upon-chunk-return.
2895 
2896       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2897 
2898       // Is there a larger chunk we could split?
2899       Metachunk* larger_chunk = NULL;
2900       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2901       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2902         larger_chunk = free_chunks(larger_chunk_index)->head();
2903         if (larger_chunk == NULL) {
2904           larger_chunk_index = next_chunk_index(larger_chunk_index);
2905         }
2906       }
2907 
2908       if (larger_chunk != NULL) {
2909         assert(larger_chunk->word_size() > word_size, "Sanity");
2910         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2911 
2912         // We found a larger chunk. Lets split it up:
2913         // - remove old chunk
2914         // - in its place, create new smaller chunks, with at least one chunk
2915         //   being of target size, the others sized as large as possible. This
2916         //   is to make sure the resulting chunks are "as coalesced as possible"
2917         //   (similar to VirtualSpaceNode::retire()).
2918         // Note: during this operation both ChunkManager and VirtualSpaceNode
2919         //  are temporarily invalid, so be careful with asserts.
2920 
2921         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2922            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2923           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2924           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2925 
2926         chunk = split_chunk(word_size, larger_chunk);
2927 
2928         // This should have worked.
2929         assert(chunk != NULL, "Sanity");
2930         assert(chunk->word_size() == word_size, "Sanity");
2931         assert(chunk->is_tagged_free(), "Sanity");
2932 
2933         we_did_split_a_chunk = true;
2934 
2935       }
2936     }
2937 
2938     if (chunk == NULL) {
2939       return NULL;
2940     }
2941 
2942     // Remove the chunk as the head of the list.
2943     free_list->remove_chunk(chunk);
2944 
2945     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2946                                        p2i(free_list), free_list->count());
2947 
2948   } else {
2949     chunk = humongous_dictionary()->get_chunk(word_size);
2950 
2951     if (chunk == NULL) {
2952       return NULL;
2953     }
2954 
2955     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2956                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2957   }
2958 
2959   // Chunk has been removed from the chunk manager; update counters.
2960   account_for_removed_chunk(chunk);
2961   do_update_in_use_info_for_chunk(chunk, true);
2962   chunk->container()->inc_container_count();
2963   chunk->inc_use_count();
2964 
2965   // Remove it from the links to this freelist
2966   chunk->set_next(NULL);
2967   chunk->set_prev(NULL);
2968 
2969   // Run some verifications (some more if we did a chunk split)
2970 #ifdef ASSERT
2971   if (VerifyMetaspace) {
2972     locked_verify();
2973     VirtualSpaceNode* const vsn = chunk->container();
2974     vsn->verify();
2975     if (we_did_split_a_chunk) {
2976       vsn->verify_free_chunks_are_ideally_merged();
2977     }
2978   }
2979 #endif
2980 
2981   return chunk;
2982 }
2983 
2984 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2985   assert_lock_strong(SpaceManager::expand_lock());
2986   slow_locked_verify();
2987 
2988   // Take from the beginning of the list
2989   Metachunk* chunk = free_chunks_get(word_size);
2990   if (chunk == NULL) {
2991     return NULL;
2992   }
2993 
2994   assert((word_size <= chunk->word_size()) ||
2995          (list_index(chunk->word_size()) == HumongousIndex),
2996          "Non-humongous variable sized chunk");
2997   LogTarget(Debug, gc, metaspace, freelist) lt;
2998   if (lt.is_enabled()) {
2999     size_t list_count;
3000     if (list_index(word_size) < HumongousIndex) {
3001       ChunkList* list = find_free_chunks_list(word_size);
3002       list_count = list->count();
3003     } else {
3004       list_count = humongous_dictionary()->total_count();
3005     }
3006     LogStream ls(lt);
3007     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3008              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3009     ResourceMark rm;
3010     locked_print_free_chunks(&ls);
3011   }
3012 
3013   return chunk;
3014 }
3015 
3016 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3017   assert_lock_strong(SpaceManager::expand_lock());
3018   DEBUG_ONLY(do_verify_chunk(chunk);)
3019   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3020   assert(chunk != NULL, "Expected chunk.");
3021   assert(chunk->container() != NULL, "Container should have been set.");
3022   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3023   index_bounds_check(index);
3024 
3025   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3026   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3027   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3028   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3029 
3030   if (index != HumongousIndex) {
3031     // Return non-humongous chunk to freelist.
3032     ChunkList* list = free_chunks(index);
3033     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3034     list->return_chunk_at_head(chunk);
3035     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3036         chunk_size_name(index), p2i(chunk));
3037   } else {
3038     // Return humongous chunk to dictionary.
3039     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3040     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3041            "Humongous chunk has wrong alignment.");
3042     _humongous_dictionary.return_chunk(chunk);
3043     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3044         chunk_size_name(index), p2i(chunk), chunk->word_size());
3045   }
3046   chunk->container()->dec_container_count();
3047   do_update_in_use_info_for_chunk(chunk, false);
3048 
3049   // Chunk has been added; update counters.
3050   account_for_added_chunk(chunk);
3051 
3052   // Attempt coalesce returned chunks with its neighboring chunks:
3053   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3054   if (index == SmallIndex || index == SpecializedIndex) {
3055     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3056       // This did not work. But if this chunk is special, we still may form a small chunk?
3057       if (index == SpecializedIndex) {
3058         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3059           // give up.
3060         }
3061       }
3062     }
3063   }
3064 
3065 }
3066 
3067 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3068   index_bounds_check(index);
3069   if (chunks == NULL) {
3070     return;
3071   }
3072   LogTarget(Trace, gc, metaspace, freelist) log;
3073   if (log.is_enabled()) { // tracing
3074     log.print("returning list of %s chunks...", chunk_size_name(index));
3075   }
3076   unsigned num_chunks_returned = 0;
3077   size_t size_chunks_returned = 0;
3078   Metachunk* cur = chunks;
3079   while (cur != NULL) {
3080     // Capture the next link before it is changed
3081     // by the call to return_chunk_at_head();
3082     Metachunk* next = cur->next();
3083     if (log.is_enabled()) { // tracing
3084       num_chunks_returned ++;
3085       size_chunks_returned += cur->word_size();
3086     }
3087     return_single_chunk(index, cur);
3088     cur = next;
3089   }
3090   if (log.is_enabled()) { // tracing
3091     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3092         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3093     if (index != HumongousIndex) {
3094       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3095     } else {
3096       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3097     }
3098   }
3099 }
3100 
3101 void ChunkManager::print_on(outputStream* out) const {
3102   _humongous_dictionary.report_statistics(out);
3103 }
3104 
3105 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3106   assert_lock_strong(SpaceManager::expand_lock());
3107   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3108     stat->num_by_type[i] = num_free_chunks(i);
3109     stat->single_size_by_type[i] = size_by_index(i);
3110     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3111   }
3112   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3113   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3114 }
3115 
3116 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3117   MutexLockerEx cl(SpaceManager::expand_lock(),
3118                    Mutex::_no_safepoint_check_flag);
3119   locked_get_statistics(stat);
3120 }
3121 
3122 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3123   size_t total = 0;
3124   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3125 
3126   const char* unit = scale_unit(scale);
3127   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3128     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3129                    stat->num_by_type[i], chunk_size_name(i),
3130                    stat->single_size_by_type[i]);
3131     if (scale == 1) {
3132       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3133     } else {
3134       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3135     }
3136 
3137     total += stat->total_size_by_type[i];
3138   }
3139 
3140 
3141   total += stat->total_size_humongous_chunks;
3142 
3143   if (scale == 1) {
3144     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3145     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3146 
3147     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3148   } else {
3149     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3150     stat->num_humongous_chunks,
3151     (float)stat->total_size_humongous_chunks / scale, unit);
3152 
3153     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3154   }
3155 
3156 }
3157 
3158 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3159   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3160 
3161   // Note: keep lock protection only to retrieving statistics; keep printing
3162   // out of lock protection
3163   ChunkManagerStatistics stat;
3164   out->print_cr("Chunkmanager (non-class):");
3165   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3166   if (non_class_cm != NULL) {
3167     non_class_cm->get_statistics(&stat);
3168     ChunkManager::print_statistics(&stat, out, scale);
3169   } else {
3170     out->print_cr("unavailable.");
3171   }
3172   out->print_cr("Chunkmanager (class):");
3173   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3174   if (class_cm != NULL) {
3175     class_cm->get_statistics(&stat);
3176     ChunkManager::print_statistics(&stat, out, scale);
3177   } else {
3178     out->print_cr("unavailable.");
3179   }
3180 }
3181 
3182 // SpaceManager methods
3183 
3184 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3185   size_t chunk_sizes[] = {
3186       specialized_chunk_size(is_class_space),
3187       small_chunk_size(is_class_space),
3188       medium_chunk_size(is_class_space)
3189   };
3190 
3191   // Adjust up to one of the fixed chunk sizes ...
3192   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3193     if (requested <= chunk_sizes[i]) {
3194       return chunk_sizes[i];
3195     }
3196   }
3197 
3198   // ... or return the size as a humongous chunk.
3199   return requested;
3200 }
3201 
3202 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3203   return adjust_initial_chunk_size(requested, is_class());
3204 }
3205 
3206 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3207   size_t requested;
3208 
3209   if (is_class()) {
3210     switch (type) {
3211     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3212     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3213     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3214     default:                                 requested = ClassSmallChunk; break;
3215     }
3216   } else {
3217     switch (type) {
3218     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3219     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3220     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3221     default:                                 requested = SmallChunk; break;
3222     }
3223   }
3224 
3225   // Adjust to one of the fixed chunk sizes (unless humongous)
3226   const size_t adjusted = adjust_initial_chunk_size(requested);
3227 
3228   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3229          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3230 
3231   return adjusted;
3232 }
3233 
3234 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3235   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3236   size_t free = 0;
3237   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3238     Metachunk* chunk = chunks_in_use(i);
3239     while (chunk != NULL) {
3240       free += chunk->free_word_size();
3241       chunk = chunk->next();
3242     }
3243   }
3244   return free;
3245 }
3246 
3247 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3248   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3249   size_t result = 0;
3250   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3251    result += sum_waste_in_chunks_in_use(i);
3252   }
3253 
3254   return result;
3255 }
3256 
3257 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3258   size_t result = 0;
3259   Metachunk* chunk = chunks_in_use(index);
3260   // Count the free space in all the chunk but not the
3261   // current chunk from which allocations are still being done.
3262   while (chunk != NULL) {
3263     if (chunk != current_chunk()) {
3264       result += chunk->free_word_size();
3265     }
3266     chunk = chunk->next();
3267   }
3268   return result;
3269 }
3270 
3271 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3272   // For CMS use "allocated_chunks_words()" which does not need the
3273   // Metaspace lock.  For the other collectors sum over the
3274   // lists.  Use both methods as a check that "allocated_chunks_words()"
3275   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3276   // to use in the product and allocated_chunks_words() should be used
3277   // but allow for  checking that allocated_chunks_words() returns the same
3278   // value as sum_capacity_in_chunks_in_use() which is the definitive
3279   // answer.
3280   if (UseConcMarkSweepGC) {
3281     return allocated_chunks_words();
3282   } else {
3283     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3284     size_t sum = 0;
3285     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3286       Metachunk* chunk = chunks_in_use(i);
3287       while (chunk != NULL) {
3288         sum += chunk->word_size();
3289         chunk = chunk->next();
3290       }
3291     }
3292   return sum;
3293   }
3294 }
3295 
3296 size_t SpaceManager::sum_count_in_chunks_in_use() {
3297   size_t count = 0;
3298   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3299     count = count + sum_count_in_chunks_in_use(i);
3300   }
3301 
3302   return count;
3303 }
3304 
3305 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3306   size_t count = 0;
3307   Metachunk* chunk = chunks_in_use(i);
3308   while (chunk != NULL) {
3309     count++;
3310     chunk = chunk->next();
3311   }
3312   return count;
3313 }
3314 
3315 
3316 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3317   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3318   size_t used = 0;
3319   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3320     Metachunk* chunk = chunks_in_use(i);
3321     while (chunk != NULL) {
3322       used += chunk->used_word_size();
3323       chunk = chunk->next();
3324     }
3325   }
3326   return used;
3327 }
3328 
3329 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3330 
3331   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3332     Metachunk* chunk = chunks_in_use(i);
3333     st->print("SpaceManager: %s " PTR_FORMAT,
3334                  chunk_size_name(i), p2i(chunk));
3335     if (chunk != NULL) {
3336       st->print_cr(" free " SIZE_FORMAT,
3337                    chunk->free_word_size());
3338     } else {
3339       st->cr();
3340     }
3341   }
3342 
3343   chunk_manager()->locked_print_free_chunks(st);
3344   chunk_manager()->locked_print_sum_free_chunks(st);
3345 }
3346 
3347 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3348 
3349   // Decide between a small chunk and a medium chunk.  Up to
3350   // _small_chunk_limit small chunks can be allocated.
3351   // After that a medium chunk is preferred.
3352   size_t chunk_word_size;
3353 
3354   // Special case for anonymous metadata space.
3355   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3356   // rarely about 4K (64-bits JVM).
3357   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3358   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3359   // reduces space waste from 60+% to around 30%.
3360   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3361       _mdtype == Metaspace::NonClassType &&
3362       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3363       word_size + Metachunk::overhead() <= SpecializedChunk) {
3364     return SpecializedChunk;
3365   }
3366 
3367   if (chunks_in_use(MediumIndex) == NULL &&
3368       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3369     chunk_word_size = (size_t) small_chunk_size();
3370     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3371       chunk_word_size = medium_chunk_size();
3372     }
3373   } else {
3374     chunk_word_size = medium_chunk_size();
3375   }
3376 
3377   // Might still need a humongous chunk.  Enforce
3378   // humongous allocations sizes to be aligned up to
3379   // the smallest chunk size.
3380   size_t if_humongous_sized_chunk =
3381     align_up(word_size + Metachunk::overhead(),
3382                   smallest_chunk_size());
3383   chunk_word_size =
3384     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3385 
3386   assert(!SpaceManager::is_humongous(word_size) ||
3387          chunk_word_size == if_humongous_sized_chunk,
3388          "Size calculation is wrong, word_size " SIZE_FORMAT
3389          " chunk_word_size " SIZE_FORMAT,
3390          word_size, chunk_word_size);
3391   Log(gc, metaspace, alloc) log;
3392   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3393     log.debug("Metadata humongous allocation:");
3394     log.debug("  word_size " PTR_FORMAT, word_size);
3395     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3396     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3397   }
3398   return chunk_word_size;
3399 }
3400 
3401 void SpaceManager::track_metaspace_memory_usage() {
3402   if (is_init_completed()) {
3403     if (is_class()) {
3404       MemoryService::track_compressed_class_memory_usage();
3405     }
3406     MemoryService::track_metaspace_memory_usage();
3407   }
3408 }
3409 
3410 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3411   assert(vs_list()->current_virtual_space() != NULL,
3412          "Should have been set");
3413   assert(current_chunk() == NULL ||
3414          current_chunk()->allocate(word_size) == NULL,
3415          "Don't need to expand");
3416   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3417 
3418   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3419     size_t words_left = 0;
3420     size_t words_used = 0;
3421     if (current_chunk() != NULL) {
3422       words_left = current_chunk()->free_word_size();
3423       words_used = current_chunk()->used_word_size();
3424     }
3425     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3426                                        word_size, words_used, words_left);
3427   }
3428 
3429   // Get another chunk
3430   size_t chunk_word_size = calc_chunk_size(word_size);
3431   Metachunk* next = get_new_chunk(chunk_word_size);
3432 
3433   MetaWord* mem = NULL;
3434 
3435   // If a chunk was available, add it to the in-use chunk list
3436   // and do an allocation from it.
3437   if (next != NULL) {
3438     // Add to this manager's list of chunks in use.
3439     add_chunk(next, false);
3440     mem = next->allocate(word_size);
3441   }
3442 
3443   // Track metaspace memory usage statistic.
3444   track_metaspace_memory_usage();
3445 
3446   return mem;
3447 }
3448 
3449 void SpaceManager::print_on(outputStream* st) const {
3450 
3451   for (ChunkIndex i = ZeroIndex;
3452        i < NumberOfInUseLists ;
3453        i = next_chunk_index(i) ) {
3454     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3455                  p2i(chunks_in_use(i)),
3456                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3457   }
3458   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3459                " Humongous " SIZE_FORMAT,
3460                sum_waste_in_chunks_in_use(SmallIndex),
3461                sum_waste_in_chunks_in_use(MediumIndex),
3462                sum_waste_in_chunks_in_use(HumongousIndex));
3463   // block free lists
3464   if (block_freelists() != NULL) {
3465     st->print_cr("total in block free lists " SIZE_FORMAT,
3466       block_freelists()->total_size());
3467   }
3468 }
3469 
3470 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3471                            Metaspace::MetaspaceType space_type,
3472                            Mutex* lock) :
3473   _mdtype(mdtype),
3474   _space_type(space_type),
3475   _allocated_blocks_words(0),
3476   _allocated_chunks_words(0),
3477   _allocated_chunks_count(0),
3478   _block_freelists(NULL),
3479   _lock(lock)
3480 {
3481   initialize();
3482 }
3483 
3484 void SpaceManager::inc_size_metrics(size_t words) {
3485   assert_lock_strong(SpaceManager::expand_lock());
3486   // Total of allocated Metachunks and allocated Metachunks count
3487   // for each SpaceManager
3488   _allocated_chunks_words = _allocated_chunks_words + words;
3489   _allocated_chunks_count++;
3490   // Global total of capacity in allocated Metachunks
3491   MetaspaceUtils::inc_capacity(mdtype(), words);
3492   // Global total of allocated Metablocks.
3493   // used_words_slow() includes the overhead in each
3494   // Metachunk so include it in the used when the
3495   // Metachunk is first added (so only added once per
3496   // Metachunk).
3497   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3498 }
3499 
3500 void SpaceManager::inc_used_metrics(size_t words) {
3501   // Add to the per SpaceManager total
3502   Atomic::add(words, &_allocated_blocks_words);
3503   // Add to the global total
3504   MetaspaceUtils::inc_used(mdtype(), words);
3505 }
3506 
3507 void SpaceManager::dec_total_from_size_metrics() {
3508   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3509   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3510   // Also deduct the overhead per Metachunk
3511   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3512 }
3513 
3514 void SpaceManager::initialize() {
3515   Metadebug::init_allocation_fail_alot_count();
3516   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3517     _chunks_in_use[i] = NULL;
3518   }
3519   _current_chunk = NULL;
3520   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3521 }
3522 
3523 SpaceManager::~SpaceManager() {
3524   // This call this->_lock which can't be done while holding expand_lock()
3525   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3526          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3527          " allocated_chunks_words() " SIZE_FORMAT,
3528          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3529 
3530   MutexLockerEx fcl(SpaceManager::expand_lock(),
3531                     Mutex::_no_safepoint_check_flag);
3532 
3533   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3534          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3535          " allocated_chunks_count() " SIZE_FORMAT,
3536          sum_count_in_chunks_in_use(), allocated_chunks_count());
3537 
3538   chunk_manager()->slow_locked_verify();
3539 
3540   dec_total_from_size_metrics();
3541 
3542   Log(gc, metaspace, freelist) log;
3543   if (log.is_trace()) {
3544     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3545     ResourceMark rm;
3546     LogStream ls(log.trace());
3547     locked_print_chunks_in_use_on(&ls);
3548     if (block_freelists() != NULL) {
3549       block_freelists()->print_on(&ls);
3550     }
3551   }
3552 
3553   // Add all the chunks in use by this space manager
3554   // to the global list of free chunks.
3555 
3556   // Follow each list of chunks-in-use and add them to the
3557   // free lists.  Each list is NULL terminated.
3558 
3559   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3560     Metachunk* chunks = chunks_in_use(i);
3561     chunk_manager()->return_chunk_list(i, chunks);
3562     set_chunks_in_use(i, NULL);
3563   }
3564 
3565   chunk_manager()->slow_locked_verify();
3566 
3567   if (_block_freelists != NULL) {
3568     delete _block_freelists;
3569   }
3570 }
3571 
3572 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3573   assert_lock_strong(_lock);
3574   // Allocations and deallocations are in raw_word_size
3575   size_t raw_word_size = get_allocation_word_size(word_size);
3576   // Lazily create a block_freelist
3577   if (block_freelists() == NULL) {
3578     _block_freelists = new BlockFreelist();
3579   }
3580   block_freelists()->return_block(p, raw_word_size);
3581 }
3582 
3583 // Adds a chunk to the list of chunks in use.
3584 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3585 
3586   assert(new_chunk != NULL, "Should not be NULL");
3587   assert(new_chunk->next() == NULL, "Should not be on a list");
3588 
3589   new_chunk->reset_empty();
3590 
3591   // Find the correct list and and set the current
3592   // chunk for that list.
3593   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3594 
3595   if (index != HumongousIndex) {
3596     retire_current_chunk();
3597     set_current_chunk(new_chunk);
3598     new_chunk->set_next(chunks_in_use(index));
3599     set_chunks_in_use(index, new_chunk);
3600   } else {
3601     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3602     // small, so small will be null.  Link this first chunk as the current
3603     // chunk.
3604     if (make_current) {
3605       // Set as the current chunk but otherwise treat as a humongous chunk.
3606       set_current_chunk(new_chunk);
3607     }
3608     // Link at head.  The _current_chunk only points to a humongous chunk for
3609     // the null class loader metaspace (class and data virtual space managers)
3610     // any humongous chunks so will not point to the tail
3611     // of the humongous chunks list.
3612     new_chunk->set_next(chunks_in_use(HumongousIndex));
3613     set_chunks_in_use(HumongousIndex, new_chunk);
3614 
3615     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3616   }
3617 
3618   // Add to the running sum of capacity
3619   inc_size_metrics(new_chunk->word_size());
3620 
3621   assert(new_chunk->is_empty(), "Not ready for reuse");
3622   Log(gc, metaspace, freelist) log;
3623   if (log.is_trace()) {
3624     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3625     ResourceMark rm;
3626     LogStream ls(log.trace());
3627     new_chunk->print_on(&ls);
3628     chunk_manager()->locked_print_free_chunks(&ls);
3629   }
3630 }
3631 
3632 void SpaceManager::retire_current_chunk() {
3633   if (current_chunk() != NULL) {
3634     size_t remaining_words = current_chunk()->free_word_size();
3635     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3636       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3637       deallocate(ptr, remaining_words);
3638       inc_used_metrics(remaining_words);
3639     }
3640   }
3641 }
3642 
3643 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3644   // Get a chunk from the chunk freelist
3645   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3646 
3647   if (next == NULL) {
3648     next = vs_list()->get_new_chunk(chunk_word_size,
3649                                     medium_chunk_bunch());
3650   }
3651 
3652   Log(gc, metaspace, alloc) log;
3653   if (log.is_debug() && next != NULL &&
3654       SpaceManager::is_humongous(next->word_size())) {
3655     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3656   }
3657 
3658   return next;
3659 }
3660 
3661 MetaWord* SpaceManager::allocate(size_t word_size) {
3662   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3663   size_t raw_word_size = get_allocation_word_size(word_size);
3664   BlockFreelist* fl =  block_freelists();
3665   MetaWord* p = NULL;
3666   // Allocation from the dictionary is expensive in the sense that
3667   // the dictionary has to be searched for a size.  Don't allocate
3668   // from the dictionary until it starts to get fat.  Is this
3669   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3670   // for allocations.  Do some profiling.  JJJ
3671   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3672     p = fl->get_block(raw_word_size);
3673   }
3674   if (p == NULL) {
3675     p = allocate_work(raw_word_size);
3676   }
3677 
3678   return p;
3679 }
3680 
3681 // Returns the address of spaced allocated for "word_size".
3682 // This methods does not know about blocks (Metablocks)
3683 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3684   assert_lock_strong(_lock);
3685 #ifdef ASSERT
3686   if (Metadebug::test_metadata_failure()) {
3687     return NULL;
3688   }
3689 #endif
3690   // Is there space in the current chunk?
3691   MetaWord* result = NULL;
3692 
3693   if (current_chunk() != NULL) {
3694     result = current_chunk()->allocate(word_size);
3695   }
3696 
3697   if (result == NULL) {
3698     result = grow_and_allocate(word_size);
3699   }
3700 
3701   if (result != NULL) {
3702     inc_used_metrics(word_size);
3703     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3704            "Head of the list is being allocated");
3705   }
3706 
3707   return result;
3708 }
3709 
3710 void SpaceManager::verify() {
3711   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3712     Metachunk* curr = chunks_in_use(i);
3713     while (curr != NULL) {
3714       DEBUG_ONLY(do_verify_chunk(curr);)
3715       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3716       curr = curr->next();
3717     }
3718   }
3719 }
3720 
3721 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3722   assert(is_humongous(chunk->word_size()) ||
3723          chunk->word_size() == medium_chunk_size() ||
3724          chunk->word_size() == small_chunk_size() ||
3725          chunk->word_size() == specialized_chunk_size(),
3726          "Chunk size is wrong");
3727   return;
3728 }
3729 
3730 #ifdef ASSERT
3731 void SpaceManager::verify_allocated_blocks_words() {
3732   // Verification is only guaranteed at a safepoint.
3733   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3734     "Verification can fail if the applications is running");
3735   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3736          "allocation total is not consistent " SIZE_FORMAT
3737          " vs " SIZE_FORMAT,
3738          allocated_blocks_words(), sum_used_in_chunks_in_use());
3739 }
3740 
3741 #endif
3742 
3743 void SpaceManager::dump(outputStream* const out) const {
3744   size_t curr_total = 0;
3745   size_t waste = 0;
3746   uint i = 0;
3747   size_t used = 0;
3748   size_t capacity = 0;
3749 
3750   // Add up statistics for all chunks in this SpaceManager.
3751   for (ChunkIndex index = ZeroIndex;
3752        index < NumberOfInUseLists;
3753        index = next_chunk_index(index)) {
3754     for (Metachunk* curr = chunks_in_use(index);
3755          curr != NULL;
3756          curr = curr->next()) {
3757       out->print("%d) ", i++);
3758       curr->print_on(out);
3759       curr_total += curr->word_size();
3760       used += curr->used_word_size();
3761       capacity += curr->word_size();
3762       waste += curr->free_word_size() + curr->overhead();;
3763     }
3764   }
3765 
3766   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3767     if (block_freelists() != NULL) block_freelists()->print_on(out);
3768   }
3769 
3770   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3771   // Free space isn't wasted.
3772   waste -= free;
3773 
3774   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3775                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3776                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3777 }
3778 
3779 // MetaspaceUtils
3780 
3781 
3782 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3783 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3784 
3785 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3786   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3787   return list == NULL ? 0 : list->free_bytes();
3788 }
3789 
3790 size_t MetaspaceUtils::free_bytes() {
3791   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3792 }
3793 
3794 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3795   assert_lock_strong(SpaceManager::expand_lock());
3796   assert(words <= capacity_words(mdtype),
3797          "About to decrement below 0: words " SIZE_FORMAT
3798          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3799          words, mdtype, capacity_words(mdtype));
3800   _capacity_words[mdtype] -= words;
3801 }
3802 
3803 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3804   assert_lock_strong(SpaceManager::expand_lock());
3805   // Needs to be atomic
3806   _capacity_words[mdtype] += words;
3807 }
3808 
3809 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3810   assert(words <= used_words(mdtype),
3811          "About to decrement below 0: words " SIZE_FORMAT
3812          " is greater than _used_words[%u] " SIZE_FORMAT,
3813          words, mdtype, used_words(mdtype));
3814   // For CMS deallocation of the Metaspaces occurs during the
3815   // sweep which is a concurrent phase.  Protection by the expand_lock()
3816   // is not enough since allocation is on a per Metaspace basis
3817   // and protected by the Metaspace lock.
3818   Atomic::sub(words, &_used_words[mdtype]);
3819 }
3820 
3821 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3822   // _used_words tracks allocations for
3823   // each piece of metadata.  Those allocations are
3824   // generally done concurrently by different application
3825   // threads so must be done atomically.
3826   Atomic::add(words, &_used_words[mdtype]);
3827 }
3828 
3829 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3830   size_t used = 0;
3831   ClassLoaderDataGraphMetaspaceIterator iter;
3832   while (iter.repeat()) {
3833     ClassLoaderMetaspace* msp = iter.get_next();
3834     // Sum allocated_blocks_words for each metaspace
3835     if (msp != NULL) {
3836       used += msp->used_words_slow(mdtype);
3837     }
3838   }
3839   return used * BytesPerWord;
3840 }
3841 
3842 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3843   size_t free = 0;
3844   ClassLoaderDataGraphMetaspaceIterator iter;
3845   while (iter.repeat()) {
3846     ClassLoaderMetaspace* msp = iter.get_next();
3847     if (msp != NULL) {
3848       free += msp->free_words_slow(mdtype);
3849     }
3850   }
3851   return free * BytesPerWord;
3852 }
3853 
3854 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3855   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3856     return 0;
3857   }
3858   // Don't count the space in the freelists.  That space will be
3859   // added to the capacity calculation as needed.
3860   size_t capacity = 0;
3861   ClassLoaderDataGraphMetaspaceIterator iter;
3862   while (iter.repeat()) {
3863     ClassLoaderMetaspace* msp = iter.get_next();
3864     if (msp != NULL) {
3865       capacity += msp->capacity_words_slow(mdtype);
3866     }
3867   }
3868   return capacity * BytesPerWord;
3869 }
3870 
3871 size_t MetaspaceUtils::capacity_bytes_slow() {
3872 #ifdef PRODUCT
3873   // Use capacity_bytes() in PRODUCT instead of this function.
3874   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3875 #endif
3876   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3877   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3878   assert(capacity_bytes() == class_capacity + non_class_capacity,
3879          "bad accounting: capacity_bytes() " SIZE_FORMAT
3880          " class_capacity + non_class_capacity " SIZE_FORMAT
3881          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3882          capacity_bytes(), class_capacity + non_class_capacity,
3883          class_capacity, non_class_capacity);
3884 
3885   return class_capacity + non_class_capacity;
3886 }
3887 
3888 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3889   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3890   return list == NULL ? 0 : list->reserved_bytes();
3891 }
3892 
3893 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3894   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3895   return list == NULL ? 0 : list->committed_bytes();
3896 }
3897 
3898 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3899 
3900 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3901   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3902   if (chunk_manager == NULL) {
3903     return 0;
3904   }
3905   chunk_manager->slow_verify();
3906   return chunk_manager->free_chunks_total_words();
3907 }
3908 
3909 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3910   return free_chunks_total_words(mdtype) * BytesPerWord;
3911 }
3912 
3913 size_t MetaspaceUtils::free_chunks_total_words() {
3914   return free_chunks_total_words(Metaspace::ClassType) +
3915          free_chunks_total_words(Metaspace::NonClassType);
3916 }
3917 
3918 size_t MetaspaceUtils::free_chunks_total_bytes() {
3919   return free_chunks_total_words() * BytesPerWord;
3920 }
3921 
3922 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3923   return Metaspace::get_chunk_manager(mdtype) != NULL;
3924 }
3925 
3926 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3927   if (!has_chunk_free_list(mdtype)) {
3928     return MetaspaceChunkFreeListSummary();
3929   }
3930 
3931   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3932   return cm->chunk_free_list_summary();
3933 }
3934 
3935 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3936   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3937                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3938 }
3939 
3940 void MetaspaceUtils::print_on(outputStream* out) {
3941   Metaspace::MetadataType nct = Metaspace::NonClassType;
3942 
3943   out->print_cr(" Metaspace       "
3944                 "used "      SIZE_FORMAT "K, "
3945                 "capacity "  SIZE_FORMAT "K, "
3946                 "committed " SIZE_FORMAT "K, "
3947                 "reserved "  SIZE_FORMAT "K",
3948                 used_bytes()/K,
3949                 capacity_bytes()/K,
3950                 committed_bytes()/K,
3951                 reserved_bytes()/K);
3952 
3953   if (Metaspace::using_class_space()) {
3954     Metaspace::MetadataType ct = Metaspace::ClassType;
3955     out->print_cr("  class space    "
3956                   "used "      SIZE_FORMAT "K, "
3957                   "capacity "  SIZE_FORMAT "K, "
3958                   "committed " SIZE_FORMAT "K, "
3959                   "reserved "  SIZE_FORMAT "K",
3960                   used_bytes(ct)/K,
3961                   capacity_bytes(ct)/K,
3962                   committed_bytes(ct)/K,
3963                   reserved_bytes(ct)/K);
3964   }
3965 }
3966 
3967 // Print information for class space and data space separately.
3968 // This is almost the same as above.
3969 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3970   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3971   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3972   size_t used_bytes = used_bytes_slow(mdtype);
3973   size_t free_bytes = free_bytes_slow(mdtype);
3974   size_t used_and_free = used_bytes + free_bytes +
3975                            free_chunks_capacity_bytes;
3976   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3977              "K + unused in chunks " SIZE_FORMAT "K  + "
3978              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3979              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3980              used_bytes / K,
3981              free_bytes / K,
3982              free_chunks_capacity_bytes / K,
3983              used_and_free / K,
3984              capacity_bytes / K);
3985   // Accounting can only be correct if we got the values during a safepoint
3986   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3987 }
3988 
3989 // Print total fragmentation for class metaspaces
3990 void MetaspaceUtils::print_class_waste(outputStream* out) {
3991   assert(Metaspace::using_class_space(), "class metaspace not used");
3992   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3993   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3994   ClassLoaderDataGraphMetaspaceIterator iter;
3995   while (iter.repeat()) {
3996     ClassLoaderMetaspace* msp = iter.get_next();
3997     if (msp != NULL) {
3998       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
3999       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4000       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4001       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
4002       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4003       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
4004       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4005     }
4006   }
4007   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4008                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4009                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4010                 "large count " SIZE_FORMAT,
4011                 cls_specialized_count, cls_specialized_waste,
4012                 cls_small_count, cls_small_waste,
4013                 cls_medium_count, cls_medium_waste, cls_humongous_count);
4014 }
4015 
4016 // Print total fragmentation for data and class metaspaces separately
4017 void MetaspaceUtils::print_waste(outputStream* out) {
4018   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
4019   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
4020 
4021   ClassLoaderDataGraphMetaspaceIterator iter;
4022   while (iter.repeat()) {
4023     ClassLoaderMetaspace* msp = iter.get_next();
4024     if (msp != NULL) {
4025       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4026       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4027       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4028       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
4029       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4030       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4031       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4032     }
4033   }
4034   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4035   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4036                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4037                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4038                         "large count " SIZE_FORMAT,
4039              specialized_count, specialized_waste, small_count,
4040              small_waste, medium_count, medium_waste, humongous_count);
4041   if (Metaspace::using_class_space()) {
4042     print_class_waste(out);
4043   }
4044 }
4045 
4046 class MetadataStats {
4047 private:
4048   size_t _capacity;
4049   size_t _used;
4050   size_t _free;
4051   size_t _waste;
4052 
4053 public:
4054   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4055   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4056   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4057 
4058   void add(const MetadataStats& stats) {
4059     _capacity += stats.capacity();
4060     _used += stats.used();
4061     _free += stats.free();
4062     _waste += stats.waste();
4063   }
4064 
4065   size_t capacity() const { return _capacity; }
4066   size_t used() const     { return _used; }
4067   size_t free() const     { return _free; }
4068   size_t waste() const    { return _waste; }
4069 
4070   void print_on(outputStream* out, size_t scale) const;
4071 };
4072 
4073 
4074 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4075   const char* unit = scale_unit(scale);
4076   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4077     (float)capacity() / scale, unit,
4078     (float)used() / scale, unit,
4079     (float)free() / scale, unit,
4080     (float)waste() / scale, unit);
4081 }
4082 
4083 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4084 private:
4085   outputStream*  _out;
4086   size_t         _scale;
4087 
4088   size_t         _total_count;
4089   MetadataStats  _total_metadata;
4090   MetadataStats  _total_class;
4091 
4092   size_t         _total_anon_count;
4093   MetadataStats  _total_anon_metadata;
4094   MetadataStats  _total_anon_class;
4095 
4096 public:
4097   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4098   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4099 
4100   ~PrintCLDMetaspaceInfoClosure() {
4101     print_summary();
4102   }
4103 
4104   void do_cld(ClassLoaderData* cld) {
4105     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4106 
4107     if (cld->is_unloading()) return;
4108     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4109     if (msp == NULL) {
4110       return;
4111     }
4112 
4113     bool anonymous = false;
4114     if (cld->is_anonymous()) {
4115       _out->print_cr("ClassLoader: for anonymous class");
4116       anonymous = true;
4117     } else {
4118       ResourceMark rm;
4119       _out->print_cr("ClassLoader: %s", cld->loader_name());
4120     }
4121 
4122     print_metaspace(msp, anonymous);
4123     _out->cr();
4124   }
4125 
4126 private:
4127   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4128   void print_summary() const;
4129 };
4130 
4131 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4132   assert(msp != NULL, "Sanity");
4133   SpaceManager* vsm = msp->vsm();
4134   const char* unit = scale_unit(_scale);
4135 
4136   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4137   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4138   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4139   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4140 
4141   _total_count ++;
4142   MetadataStats metadata_stats(capacity, used, free, waste);
4143   _total_metadata.add(metadata_stats);
4144 
4145   if (anonymous) {
4146     _total_anon_count ++;
4147     _total_anon_metadata.add(metadata_stats);
4148   }
4149 
4150   _out->print("  Metadata   ");
4151   metadata_stats.print_on(_out, _scale);
4152 
4153   if (Metaspace::using_class_space()) {
4154     vsm = msp->class_vsm();
4155 
4156     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4157     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4158     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4159     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4160 
4161     MetadataStats class_stats(capacity, used, free, waste);
4162     _total_class.add(class_stats);
4163 
4164     if (anonymous) {
4165       _total_anon_class.add(class_stats);
4166     }
4167 
4168     _out->print("  Class data ");
4169     class_stats.print_on(_out, _scale);
4170   }
4171 }
4172 
4173 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4174   const char* unit = scale_unit(_scale);
4175   _out->cr();
4176   _out->print_cr("Summary:");
4177 
4178   MetadataStats total;
4179   total.add(_total_metadata);
4180   total.add(_total_class);
4181 
4182   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4183   total.print_on(_out, _scale);
4184 
4185   _out->print("                    Metadata ");
4186   _total_metadata.print_on(_out, _scale);
4187 
4188   if (Metaspace::using_class_space()) {
4189     _out->print("                  Class data ");
4190     _total_class.print_on(_out, _scale);
4191   }
4192   _out->cr();
4193 
4194   MetadataStats total_anon;
4195   total_anon.add(_total_anon_metadata);
4196   total_anon.add(_total_anon_class);
4197 
4198   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4199   total_anon.print_on(_out, _scale);
4200 
4201   _out->print("                    Metadata ");
4202   _total_anon_metadata.print_on(_out, _scale);
4203 
4204   if (Metaspace::using_class_space()) {
4205     _out->print("                  Class data ");
4206     _total_anon_class.print_on(_out, _scale);
4207   }
4208 }
4209 
4210 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4211   const char* unit = scale_unit(scale);
4212   out->print_cr("Metaspaces:");
4213   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4214     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4215     committed_bytes(Metaspace::NonClassType) / scale, unit);
4216   if (Metaspace::using_class_space()) {
4217     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4218     reserved_bytes(Metaspace::ClassType) / scale, unit,
4219     committed_bytes(Metaspace::ClassType) / scale, unit);
4220   }
4221 
4222   out->cr();
4223   ChunkManager::print_all_chunkmanagers(out, scale);
4224 
4225   out->cr();
4226   out->print_cr("Per-classloader metadata:");
4227   out->cr();
4228 
4229   PrintCLDMetaspaceInfoClosure cl(out, scale);
4230   ClassLoaderDataGraph::cld_do(&cl);
4231 }
4232 
4233 
4234 // Dump global metaspace things from the end of ClassLoaderDataGraph
4235 void MetaspaceUtils::dump(outputStream* out) {
4236   out->print_cr("All Metaspace:");
4237   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4238   out->print("class space: "); print_on(out, Metaspace::ClassType);
4239   print_waste(out);
4240 }
4241 
4242 // Prints an ASCII representation of the given space.
4243 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4244   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4245   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4246   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4247   if (vsl != NULL) {
4248     if (for_class) {
4249       if (!Metaspace::using_class_space()) {
4250         out->print_cr("No Class Space.");
4251         return;
4252       }
4253       out->print_raw("---- Metaspace Map (Class Space) ----");
4254     } else {
4255       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4256     }
4257     // Print legend:
4258     out->cr();
4259     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4260     out->cr();
4261     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4262     vsl->print_map(out);
4263     out->cr();
4264   }
4265 }
4266 
4267 void MetaspaceUtils::verify_free_chunks() {
4268   Metaspace::chunk_manager_metadata()->verify();
4269   if (Metaspace::using_class_space()) {
4270     Metaspace::chunk_manager_class()->verify();
4271   }
4272 }
4273 
4274 void MetaspaceUtils::verify_capacity() {
4275 #ifdef ASSERT
4276   size_t running_sum_capacity_bytes = capacity_bytes();
4277   // For purposes of the running sum of capacity, verify against capacity
4278   size_t capacity_in_use_bytes = capacity_bytes_slow();
4279   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4280          "capacity_words() * BytesPerWord " SIZE_FORMAT
4281          " capacity_bytes_slow()" SIZE_FORMAT,
4282          running_sum_capacity_bytes, capacity_in_use_bytes);
4283   for (Metaspace::MetadataType i = Metaspace::ClassType;
4284        i < Metaspace:: MetadataTypeCount;
4285        i = (Metaspace::MetadataType)(i + 1)) {
4286     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4287     assert(capacity_bytes(i) == capacity_in_use_bytes,
4288            "capacity_bytes(%u) " SIZE_FORMAT
4289            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4290            i, capacity_bytes(i), i, capacity_in_use_bytes);
4291   }
4292 #endif
4293 }
4294 
4295 void MetaspaceUtils::verify_used() {
4296 #ifdef ASSERT
4297   size_t running_sum_used_bytes = used_bytes();
4298   // For purposes of the running sum of used, verify against used
4299   size_t used_in_use_bytes = used_bytes_slow();
4300   assert(used_bytes() == used_in_use_bytes,
4301          "used_bytes() " SIZE_FORMAT
4302          " used_bytes_slow()" SIZE_FORMAT,
4303          used_bytes(), used_in_use_bytes);
4304   for (Metaspace::MetadataType i = Metaspace::ClassType;
4305        i < Metaspace:: MetadataTypeCount;
4306        i = (Metaspace::MetadataType)(i + 1)) {
4307     size_t used_in_use_bytes = used_bytes_slow(i);
4308     assert(used_bytes(i) == used_in_use_bytes,
4309            "used_bytes(%u) " SIZE_FORMAT
4310            " used_bytes_slow(%u)" SIZE_FORMAT,
4311            i, used_bytes(i), i, used_in_use_bytes);
4312   }
4313 #endif
4314 }
4315 
4316 void MetaspaceUtils::verify_metrics() {
4317   verify_capacity();
4318   verify_used();
4319 }
4320 
4321 
4322 // Metaspace methods
4323 
4324 size_t Metaspace::_first_chunk_word_size = 0;
4325 size_t Metaspace::_first_class_chunk_word_size = 0;
4326 
4327 size_t Metaspace::_commit_alignment = 0;
4328 size_t Metaspace::_reserve_alignment = 0;
4329 
4330 VirtualSpaceList* Metaspace::_space_list = NULL;
4331 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4332 
4333 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4334 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4335 
4336 #define VIRTUALSPACEMULTIPLIER 2
4337 
4338 #ifdef _LP64
4339 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4340 
4341 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4342   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4343   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4344   // narrow_klass_base is the lower of the metaspace base and the cds base
4345   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4346   // between the lower base and higher address.
4347   address lower_base;
4348   address higher_address;
4349 #if INCLUDE_CDS
4350   if (UseSharedSpaces) {
4351     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4352                           (address)(metaspace_base + compressed_class_space_size()));
4353     lower_base = MIN2(metaspace_base, cds_base);
4354   } else
4355 #endif
4356   {
4357     higher_address = metaspace_base + compressed_class_space_size();
4358     lower_base = metaspace_base;
4359 
4360     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4361     // If compressed class space fits in lower 32G, we don't need a base.
4362     if (higher_address <= (address)klass_encoding_max) {
4363       lower_base = 0; // Effectively lower base is zero.
4364     }
4365   }
4366 
4367   Universe::set_narrow_klass_base(lower_base);
4368 
4369   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4370   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4371   // how dump time narrow_klass_shift is set. Although, CDS can work
4372   // with zero-shift mode also, to be consistent with AOT it uses
4373   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4374   // can be used at same time as AOT code.
4375   if (!UseSharedSpaces
4376       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4377     Universe::set_narrow_klass_shift(0);
4378   } else {
4379     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4380   }
4381   AOTLoader::set_narrow_klass_shift();
4382 }
4383 
4384 #if INCLUDE_CDS
4385 // Return TRUE if the specified metaspace_base and cds_base are close enough
4386 // to work with compressed klass pointers.
4387 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4388   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4389   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4390   address lower_base = MIN2((address)metaspace_base, cds_base);
4391   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4392                                 (address)(metaspace_base + compressed_class_space_size()));
4393   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4394 }
4395 #endif
4396 
4397 // Try to allocate the metaspace at the requested addr.
4398 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4399   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4400   assert(using_class_space(), "called improperly");
4401   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4402   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4403          "Metaspace size is too big");
4404   assert_is_aligned(requested_addr, _reserve_alignment);
4405   assert_is_aligned(cds_base, _reserve_alignment);
4406   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4407 
4408   // Don't use large pages for the class space.
4409   bool large_pages = false;
4410 
4411 #if !(defined(AARCH64) || defined(AIX))
4412   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4413                                              _reserve_alignment,
4414                                              large_pages,
4415                                              requested_addr);
4416 #else // AARCH64
4417   ReservedSpace metaspace_rs;
4418 
4419   // Our compressed klass pointers may fit nicely into the lower 32
4420   // bits.
4421   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4422     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4423                                  _reserve_alignment,
4424                                  large_pages,
4425                                  requested_addr);
4426   }
4427 
4428   if (! metaspace_rs.is_reserved()) {
4429     // Aarch64: Try to align metaspace so that we can decode a compressed
4430     // klass with a single MOVK instruction.  We can do this iff the
4431     // compressed class base is a multiple of 4G.
4432     // Aix: Search for a place where we can find memory. If we need to load
4433     // the base, 4G alignment is helpful, too.
4434     size_t increment = AARCH64_ONLY(4*)G;
4435     for (char *a = align_up(requested_addr, increment);
4436          a < (char*)(1024*G);
4437          a += increment) {
4438       if (a == (char *)(32*G)) {
4439         // Go faster from here on. Zero-based is no longer possible.
4440         increment = 4*G;
4441       }
4442 
4443 #if INCLUDE_CDS
4444       if (UseSharedSpaces
4445           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4446         // We failed to find an aligned base that will reach.  Fall
4447         // back to using our requested addr.
4448         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4449                                      _reserve_alignment,
4450                                      large_pages,
4451                                      requested_addr);
4452         break;
4453       }
4454 #endif
4455 
4456       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4457                                    _reserve_alignment,
4458                                    large_pages,
4459                                    a);
4460       if (metaspace_rs.is_reserved())
4461         break;
4462     }
4463   }
4464 
4465 #endif // AARCH64
4466 
4467   if (!metaspace_rs.is_reserved()) {
4468 #if INCLUDE_CDS
4469     if (UseSharedSpaces) {
4470       size_t increment = align_up(1*G, _reserve_alignment);
4471 
4472       // Keep trying to allocate the metaspace, increasing the requested_addr
4473       // by 1GB each time, until we reach an address that will no longer allow
4474       // use of CDS with compressed klass pointers.
4475       char *addr = requested_addr;
4476       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4477              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4478         addr = addr + increment;
4479         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4480                                      _reserve_alignment, large_pages, addr);
4481       }
4482     }
4483 #endif
4484     // If no successful allocation then try to allocate the space anywhere.  If
4485     // that fails then OOM doom.  At this point we cannot try allocating the
4486     // metaspace as if UseCompressedClassPointers is off because too much
4487     // initialization has happened that depends on UseCompressedClassPointers.
4488     // So, UseCompressedClassPointers cannot be turned off at this point.
4489     if (!metaspace_rs.is_reserved()) {
4490       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4491                                    _reserve_alignment, large_pages);
4492       if (!metaspace_rs.is_reserved()) {
4493         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4494                                               compressed_class_space_size()));
4495       }
4496     }
4497   }
4498 
4499   // If we got here then the metaspace got allocated.
4500   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4501 
4502 #if INCLUDE_CDS
4503   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4504   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4505     FileMapInfo::stop_sharing_and_unmap(
4506         "Could not allocate metaspace at a compatible address");
4507   }
4508 #endif
4509   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4510                                   UseSharedSpaces ? (address)cds_base : 0);
4511 
4512   initialize_class_space(metaspace_rs);
4513 
4514   LogTarget(Trace, gc, metaspace) lt;
4515   if (lt.is_enabled()) {
4516     ResourceMark rm;
4517     LogStream ls(lt);
4518     print_compressed_class_space(&ls, requested_addr);
4519   }
4520 }
4521 
4522 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4523   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4524                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4525   if (_class_space_list != NULL) {
4526     address base = (address)_class_space_list->current_virtual_space()->bottom();
4527     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4528                  compressed_class_space_size(), p2i(base));
4529     if (requested_addr != 0) {
4530       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4531     }
4532     st->cr();
4533   }
4534 }
4535 
4536 // For UseCompressedClassPointers the class space is reserved above the top of
4537 // the Java heap.  The argument passed in is at the base of the compressed space.
4538 void Metaspace::initialize_class_space(ReservedSpace rs) {
4539   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4540   assert(rs.size() >= CompressedClassSpaceSize,
4541          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4542   assert(using_class_space(), "Must be using class space");
4543   _class_space_list = new VirtualSpaceList(rs);
4544   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4545 
4546   if (!_class_space_list->initialization_succeeded()) {
4547     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4548   }
4549 }
4550 
4551 #endif
4552 
4553 void Metaspace::ergo_initialize() {
4554   if (DumpSharedSpaces) {
4555     // Using large pages when dumping the shared archive is currently not implemented.
4556     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4557   }
4558 
4559   size_t page_size = os::vm_page_size();
4560   if (UseLargePages && UseLargePagesInMetaspace) {
4561     page_size = os::large_page_size();
4562   }
4563 
4564   _commit_alignment  = page_size;
4565   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4566 
4567   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4568   // override if MaxMetaspaceSize was set on the command line or not.
4569   // This information is needed later to conform to the specification of the
4570   // java.lang.management.MemoryUsage API.
4571   //
4572   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4573   // globals.hpp to the aligned value, but this is not possible, since the
4574   // alignment depends on other flags being parsed.
4575   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4576 
4577   if (MetaspaceSize > MaxMetaspaceSize) {
4578     MetaspaceSize = MaxMetaspaceSize;
4579   }
4580 
4581   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4582 
4583   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4584 
4585   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4586   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4587 
4588   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4589 
4590   // Initial virtual space size will be calculated at global_initialize()
4591   size_t min_metaspace_sz =
4592       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4593   if (UseCompressedClassPointers) {
4594     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4595       if (min_metaspace_sz >= MaxMetaspaceSize) {
4596         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4597       } else {
4598         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4599                       MaxMetaspaceSize - min_metaspace_sz);
4600       }
4601     }
4602   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4603     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4604                   min_metaspace_sz);
4605   }
4606 
4607   set_compressed_class_space_size(CompressedClassSpaceSize);
4608 }
4609 
4610 void Metaspace::global_initialize() {
4611   MetaspaceGC::initialize();
4612 
4613 #if INCLUDE_CDS
4614   if (DumpSharedSpaces) {
4615     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4616   } else if (UseSharedSpaces) {
4617     // If any of the archived space fails to map, UseSharedSpaces
4618     // is reset to false. Fall through to the
4619     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4620     // metaspace.
4621     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4622   }
4623 
4624   if (!DumpSharedSpaces && !UseSharedSpaces)
4625 #endif // INCLUDE_CDS
4626   {
4627 #ifdef _LP64
4628     if (using_class_space()) {
4629       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4630       allocate_metaspace_compressed_klass_ptrs(base, 0);
4631     }
4632 #endif // _LP64
4633   }
4634 
4635   // Initialize these before initializing the VirtualSpaceList
4636   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4637   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4638   // Make the first class chunk bigger than a medium chunk so it's not put
4639   // on the medium chunk list.   The next chunk will be small and progress
4640   // from there.  This size calculated by -version.
4641   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4642                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4643   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4644   // Arbitrarily set the initial virtual space to a multiple
4645   // of the boot class loader size.
4646   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4647   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4648 
4649   // Initialize the list of virtual spaces.
4650   _space_list = new VirtualSpaceList(word_size);
4651   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4652 
4653   if (!_space_list->initialization_succeeded()) {
4654     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4655   }
4656 
4657   _tracer = new MetaspaceTracer();
4658 }
4659 
4660 void Metaspace::post_initialize() {
4661   MetaspaceGC::post_initialize();
4662 }
4663 
4664 void Metaspace::verify_global_initialization() {
4665   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4666   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4667 
4668   if (using_class_space()) {
4669     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4670     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4671   }
4672 }
4673 
4674 size_t Metaspace::align_word_size_up(size_t word_size) {
4675   size_t byte_size = word_size * wordSize;
4676   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4677 }
4678 
4679 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4680                               MetaspaceObj::Type type, TRAPS) {
4681   assert(!_frozen, "sanity");
4682   if (HAS_PENDING_EXCEPTION) {
4683     assert(false, "Should not allocate with exception pending");
4684     return NULL;  // caller does a CHECK_NULL too
4685   }
4686 
4687   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4688         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4689 
4690   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4691 
4692   // Try to allocate metadata.
4693   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4694 
4695   if (result == NULL) {
4696     if (DumpSharedSpaces && THREAD->is_VM_thread()) {
4697       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4698           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4699       vm_exit(1);
4700     }
4701 
4702     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4703 
4704     // Allocation failed.
4705     if (is_init_completed()) {
4706       // Only start a GC if the bootstrapping has completed.
4707 
4708       // Try to clean out some memory and retry.
4709       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4710     }
4711   }
4712 
4713   if (result == NULL) {
4714     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4715   }
4716 
4717   // Zero initialize.
4718   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4719 
4720   return result;
4721 }
4722 
4723 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4724   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4725 
4726   // If result is still null, we are out of memory.
4727   Log(gc, metaspace, freelist) log;
4728   if (log.is_info()) {
4729     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4730              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4731     ResourceMark rm;
4732     if (log.is_debug()) {
4733       if (loader_data->metaspace_or_null() != NULL) {
4734         LogStream ls(log.debug());
4735         loader_data->print_value_on(&ls);
4736       }
4737     }
4738     LogStream ls(log.info());
4739     MetaspaceUtils::dump(&ls);
4740     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4741     ChunkManager::print_all_chunkmanagers(&ls);
4742   }
4743 
4744   bool out_of_compressed_class_space = false;
4745   if (is_class_space_allocation(mdtype)) {
4746     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4747     out_of_compressed_class_space =
4748       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4749       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4750       CompressedClassSpaceSize;
4751   }
4752 
4753   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4754   const char* space_string = out_of_compressed_class_space ?
4755     "Compressed class space" : "Metaspace";
4756 
4757   report_java_out_of_memory(space_string);
4758 
4759   if (JvmtiExport::should_post_resource_exhausted()) {
4760     JvmtiExport::post_resource_exhausted(
4761         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4762         space_string);
4763   }
4764 
4765   if (!is_init_completed()) {
4766     vm_exit_during_initialization("OutOfMemoryError", space_string);
4767   }
4768 
4769   if (out_of_compressed_class_space) {
4770     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4771   } else {
4772     THROW_OOP(Universe::out_of_memory_error_metaspace());
4773   }
4774 }
4775 
4776 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4777   switch (mdtype) {
4778     case Metaspace::ClassType: return "Class";
4779     case Metaspace::NonClassType: return "Metadata";
4780     default:
4781       assert(false, "Got bad mdtype: %d", (int) mdtype);
4782       return NULL;
4783   }
4784 }
4785 
4786 void Metaspace::purge(MetadataType mdtype) {
4787   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4788 }
4789 
4790 void Metaspace::purge() {
4791   MutexLockerEx cl(SpaceManager::expand_lock(),
4792                    Mutex::_no_safepoint_check_flag);
4793   purge(NonClassType);
4794   if (using_class_space()) {
4795     purge(ClassType);
4796   }
4797 }
4798 
4799 bool Metaspace::contains(const void* ptr) {
4800   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4801     return true;
4802   }
4803   return contains_non_shared(ptr);
4804 }
4805 
4806 bool Metaspace::contains_non_shared(const void* ptr) {
4807   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4808      return true;
4809   }
4810 
4811   return get_space_list(NonClassType)->contains(ptr);
4812 }
4813 
4814 // ClassLoaderMetaspace
4815 
4816 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4817   initialize(lock, type);
4818 }
4819 
4820 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4821   delete _vsm;
4822   if (Metaspace::using_class_space()) {
4823     delete _class_vsm;
4824   }
4825 }
4826 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4827   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4828   if (chunk != NULL) {
4829     // Add to this manager's list of chunks in use and current_chunk().
4830     get_space_manager(mdtype)->add_chunk(chunk, true);
4831   }
4832 }
4833 
4834 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4835   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4836 
4837   // Get a chunk from the chunk freelist
4838   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4839 
4840   if (chunk == NULL) {
4841     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4842                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4843   }
4844 
4845   return chunk;
4846 }
4847 
4848 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4849   Metaspace::verify_global_initialization();
4850 
4851   // Allocate SpaceManager for metadata objects.
4852   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4853 
4854   if (Metaspace::using_class_space()) {
4855     // Allocate SpaceManager for classes.
4856     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4857   }
4858 
4859   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4860 
4861   // Allocate chunk for metadata objects
4862   initialize_first_chunk(type, Metaspace::NonClassType);
4863 
4864   // Allocate chunk for class metadata objects
4865   if (Metaspace::using_class_space()) {
4866     initialize_first_chunk(type, Metaspace::ClassType);
4867   }
4868 }
4869 
4870 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4871   Metaspace::assert_not_frozen();
4872   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4873   if (Metaspace::is_class_space_allocation(mdtype)) {
4874     return  class_vsm()->allocate(word_size);
4875   } else {
4876     return  vsm()->allocate(word_size);
4877   }
4878 }
4879 
4880 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4881   Metaspace::assert_not_frozen();
4882   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4883   assert(delta_bytes > 0, "Must be");
4884 
4885   size_t before = 0;
4886   size_t after = 0;
4887   MetaWord* res;
4888   bool incremented;
4889 
4890   // Each thread increments the HWM at most once. Even if the thread fails to increment
4891   // the HWM, an allocation is still attempted. This is because another thread must then
4892   // have incremented the HWM and therefore the allocation might still succeed.
4893   do {
4894     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4895     res = allocate(word_size, mdtype);
4896   } while (!incremented && res == NULL);
4897 
4898   if (incremented) {
4899     Metaspace::tracer()->report_gc_threshold(before, after,
4900                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4901     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4902   }
4903 
4904   return res;
4905 }
4906 
4907 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4908   if (mdtype == Metaspace::ClassType) {
4909     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4910   } else {
4911     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4912   }
4913 }
4914 
4915 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4916   Metaspace::assert_not_frozen();
4917   if (mdtype == Metaspace::ClassType) {
4918     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4919   } else {
4920     return vsm()->sum_free_in_chunks_in_use();
4921   }
4922 }
4923 
4924 // Space capacity in the Metaspace.  It includes
4925 // space in the list of chunks from which allocations
4926 // have been made. Don't include space in the global freelist and
4927 // in the space available in the dictionary which
4928 // is already counted in some chunk.
4929 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4930   if (mdtype == Metaspace::ClassType) {
4931     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4932   } else {
4933     return vsm()->sum_capacity_in_chunks_in_use();
4934   }
4935 }
4936 
4937 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4938   return used_words_slow(mdtype) * BytesPerWord;
4939 }
4940 
4941 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4942   return capacity_words_slow(mdtype) * BytesPerWord;
4943 }
4944 
4945 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4946   return vsm()->allocated_blocks_bytes() +
4947       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4948 }
4949 
4950 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4951   return vsm()->allocated_chunks_bytes() +
4952       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4953 }
4954 
4955 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4956   Metaspace::assert_not_frozen();
4957   assert(!SafepointSynchronize::is_at_safepoint()
4958          || Thread::current()->is_VM_thread(), "should be the VM thread");
4959 
4960   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4961 
4962   if (is_class && Metaspace::using_class_space()) {
4963     class_vsm()->deallocate(ptr, word_size);
4964   } else {
4965     vsm()->deallocate(ptr, word_size);
4966   }
4967 }
4968 
4969 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4970   assert(Metaspace::using_class_space(), "Has to use class space");
4971   return class_vsm()->calc_chunk_size(word_size);
4972 }
4973 
4974 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4975   // Print both class virtual space counts and metaspace.
4976   if (Verbose) {
4977     vsm()->print_on(out);
4978     if (Metaspace::using_class_space()) {
4979       class_vsm()->print_on(out);
4980     }
4981   }
4982 }
4983 
4984 void ClassLoaderMetaspace::verify() {
4985   vsm()->verify();
4986   if (Metaspace::using_class_space()) {
4987     class_vsm()->verify();
4988   }
4989 }
4990 
4991 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4992   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4993   vsm()->dump(out);
4994   if (Metaspace::using_class_space()) {
4995     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4996     class_vsm()->dump(out);
4997   }
4998 }
4999 
5000 
5001 
5002 #ifdef ASSERT
5003 static void do_verify_chunk(Metachunk* chunk) {
5004   guarantee(chunk != NULL, "Sanity");
5005   // Verify chunk itself; then verify that it is consistent with the
5006   // occupany map of its containing node.
5007   chunk->verify();
5008   VirtualSpaceNode* const vsn = chunk->container();
5009   OccupancyMap* const ocmap = vsn->occupancy_map();
5010   ocmap->verify_for_chunk(chunk);
5011 }
5012 #endif
5013 
5014 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5015   chunk->set_is_tagged_free(!inuse);
5016   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5017   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5018 }
5019 
5020 /////////////// Unit tests ///////////////
5021 
5022 #ifndef PRODUCT
5023 
5024 class TestMetaspaceUtilsTest : AllStatic {
5025  public:
5026   static void test_reserved() {
5027     size_t reserved = MetaspaceUtils::reserved_bytes();
5028 
5029     assert(reserved > 0, "assert");
5030 
5031     size_t committed  = MetaspaceUtils::committed_bytes();
5032     assert(committed <= reserved, "assert");
5033 
5034     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5035     assert(reserved_metadata > 0, "assert");
5036     assert(reserved_metadata <= reserved, "assert");
5037 
5038     if (UseCompressedClassPointers) {
5039       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5040       assert(reserved_class > 0, "assert");
5041       assert(reserved_class < reserved, "assert");
5042     }
5043   }
5044 
5045   static void test_committed() {
5046     size_t committed = MetaspaceUtils::committed_bytes();
5047 
5048     assert(committed > 0, "assert");
5049 
5050     size_t reserved  = MetaspaceUtils::reserved_bytes();
5051     assert(committed <= reserved, "assert");
5052 
5053     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5054     assert(committed_metadata > 0, "assert");
5055     assert(committed_metadata <= committed, "assert");
5056 
5057     if (UseCompressedClassPointers) {
5058       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5059       assert(committed_class > 0, "assert");
5060       assert(committed_class < committed, "assert");
5061     }
5062   }
5063 
5064   static void test_virtual_space_list_large_chunk() {
5065     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5066     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5067     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5068     // vm_allocation_granularity aligned on Windows.
5069     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5070     large_size += (os::vm_page_size()/BytesPerWord);
5071     vs_list->get_new_chunk(large_size, 0);
5072   }
5073 
5074   static void test() {
5075     test_reserved();
5076     test_committed();
5077     test_virtual_space_list_large_chunk();
5078   }
5079 };
5080 
5081 void TestMetaspaceUtils_test() {
5082   TestMetaspaceUtilsTest::test();
5083 }
5084 
5085 class TestVirtualSpaceNodeTest {
5086   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5087                                           size_t& num_small_chunks,
5088                                           size_t& num_specialized_chunks) {
5089     num_medium_chunks = words_left / MediumChunk;
5090     words_left = words_left % MediumChunk;
5091 
5092     num_small_chunks = words_left / SmallChunk;
5093     words_left = words_left % SmallChunk;
5094     // how many specialized chunks can we get?
5095     num_specialized_chunks = words_left / SpecializedChunk;
5096     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5097   }
5098 
5099  public:
5100   static void test() {
5101     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5102     const size_t vsn_test_size_words = MediumChunk  * 4;
5103     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5104 
5105     // The chunk sizes must be multiples of eachother, or this will fail
5106     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5107     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5108 
5109     { // No committed memory in VSN
5110       ChunkManager cm(false);
5111       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5112       vsn.initialize();
5113       vsn.retire(&cm);
5114       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5115     }
5116 
5117     { // All of VSN is committed, half is used by chunks
5118       ChunkManager cm(false);
5119       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5120       vsn.initialize();
5121       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5122       vsn.get_chunk_vs(MediumChunk);
5123       vsn.get_chunk_vs(MediumChunk);
5124       vsn.retire(&cm);
5125       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5126       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5127     }
5128 
5129     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5130     // This doesn't work for systems with vm_page_size >= 16K.
5131     if (page_chunks < MediumChunk) {
5132       // 4 pages of VSN is committed, some is used by chunks
5133       ChunkManager cm(false);
5134       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5135 
5136       vsn.initialize();
5137       vsn.expand_by(page_chunks, page_chunks);
5138       vsn.get_chunk_vs(SmallChunk);
5139       vsn.get_chunk_vs(SpecializedChunk);
5140       vsn.retire(&cm);
5141 
5142       // committed - used = words left to retire
5143       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5144 
5145       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5146       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5147 
5148       assert(num_medium_chunks == 0, "should not get any medium chunks");
5149       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5150       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5151     }
5152 
5153     { // Half of VSN is committed, a humongous chunk is used
5154       ChunkManager cm(false);
5155       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5156       vsn.initialize();
5157       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5158       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5159       vsn.retire(&cm);
5160 
5161       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5162       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5163       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5164 
5165       assert(num_medium_chunks == 0, "should not get any medium chunks");
5166       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5167       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5168     }
5169 
5170   }
5171 
5172 #define assert_is_available_positive(word_size) \
5173   assert(vsn.is_available(word_size), \
5174          #word_size ": " PTR_FORMAT " bytes were not available in " \
5175          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5176          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5177 
5178 #define assert_is_available_negative(word_size) \
5179   assert(!vsn.is_available(word_size), \
5180          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5181          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5182          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5183 
5184   static void test_is_available_positive() {
5185     // Reserve some memory.
5186     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5187     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5188 
5189     // Commit some memory.
5190     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5191     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5192     assert(expanded, "Failed to commit");
5193 
5194     // Check that is_available accepts the committed size.
5195     assert_is_available_positive(commit_word_size);
5196 
5197     // Check that is_available accepts half the committed size.
5198     size_t expand_word_size = commit_word_size / 2;
5199     assert_is_available_positive(expand_word_size);
5200   }
5201 
5202   static void test_is_available_negative() {
5203     // Reserve some memory.
5204     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5205     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5206 
5207     // Commit some memory.
5208     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5209     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5210     assert(expanded, "Failed to commit");
5211 
5212     // Check that is_available doesn't accept a too large size.
5213     size_t two_times_commit_word_size = commit_word_size * 2;
5214     assert_is_available_negative(two_times_commit_word_size);
5215   }
5216 
5217   static void test_is_available_overflow() {
5218     // Reserve some memory.
5219     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5220     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5221 
5222     // Commit some memory.
5223     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5224     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5225     assert(expanded, "Failed to commit");
5226 
5227     // Calculate a size that will overflow the virtual space size.
5228     void* virtual_space_max = (void*)(uintptr_t)-1;
5229     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5230     size_t overflow_size = bottom_to_max + BytesPerWord;
5231     size_t overflow_word_size = overflow_size / BytesPerWord;
5232 
5233     // Check that is_available can handle the overflow.
5234     assert_is_available_negative(overflow_word_size);
5235   }
5236 
5237   static void test_is_available() {
5238     TestVirtualSpaceNodeTest::test_is_available_positive();
5239     TestVirtualSpaceNodeTest::test_is_available_negative();
5240     TestVirtualSpaceNodeTest::test_is_available_overflow();
5241   }
5242 };
5243 
5244 // The following test is placed here instead of a gtest / unittest file
5245 // because the ChunkManager class is only available in this file.
5246 void ChunkManager_test_list_index() {
5247   ChunkManager manager(true);
5248 
5249   // Test previous bug where a query for a humongous class metachunk,
5250   // incorrectly matched the non-class medium metachunk size.
5251   {
5252     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5253 
5254     ChunkIndex index = manager.list_index(MediumChunk);
5255 
5256     assert(index == HumongousIndex,
5257            "Requested size is larger than ClassMediumChunk,"
5258            " so should return HumongousIndex. Got index: %d", (int)index);
5259   }
5260 
5261   // Check the specified sizes as well.
5262   {
5263     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5264     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5265   }
5266   {
5267     ChunkIndex index = manager.list_index(ClassSmallChunk);
5268     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5269   }
5270   {
5271     ChunkIndex index = manager.list_index(ClassMediumChunk);
5272     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5273   }
5274   {
5275     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5276     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5277   }
5278 }
5279 
5280 #endif // !PRODUCT
5281 
5282 #ifdef ASSERT
5283 
5284 // The following test is placed here instead of a gtest / unittest file
5285 // because the ChunkManager class is only available in this file.
5286 class SpaceManagerTest : AllStatic {
5287   friend void SpaceManager_test_adjust_initial_chunk_size();
5288 
5289   static void test_adjust_initial_chunk_size(bool is_class) {
5290     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5291     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5292     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5293 
5294 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5295     do {                                                                         \
5296       size_t v = value;                                                          \
5297       size_t e = expected;                                                       \
5298       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5299              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5300     } while (0)
5301 
5302     // Smallest (specialized)
5303     test_adjust_initial_chunk_size(1,            smallest, is_class);
5304     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5305     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5306 
5307     // Small
5308     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5309     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5310     test_adjust_initial_chunk_size(normal,       normal, is_class);
5311 
5312     // Medium
5313     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5314     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5315     test_adjust_initial_chunk_size(medium,     medium, is_class);
5316 
5317     // Humongous
5318     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5319 
5320 #undef test_adjust_initial_chunk_size
5321   }
5322 
5323   static void test_adjust_initial_chunk_size() {
5324     test_adjust_initial_chunk_size(false);
5325     test_adjust_initial_chunk_size(true);
5326   }
5327 };
5328 
5329 void SpaceManager_test_adjust_initial_chunk_size() {
5330   SpaceManagerTest::test_adjust_initial_chunk_size();
5331 }
5332 
5333 #endif // ASSERT
5334 
5335 struct chunkmanager_statistics_t {
5336   int num_specialized_chunks;
5337   int num_small_chunks;
5338   int num_medium_chunks;
5339   int num_humongous_chunks;
5340 };
5341 
5342 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5343   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5344   ChunkManager::ChunkManagerStatistics stat;
5345   chunk_manager->get_statistics(&stat);
5346   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5347   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5348   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5349   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5350 }
5351 
5352 struct chunk_geometry_t {
5353   size_t specialized_chunk_word_size;
5354   size_t small_chunk_word_size;
5355   size_t medium_chunk_word_size;
5356 };
5357 
5358 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5359   if (mdType == Metaspace::NonClassType) {
5360     out->specialized_chunk_word_size = SpecializedChunk;
5361     out->small_chunk_word_size = SmallChunk;
5362     out->medium_chunk_word_size = MediumChunk;
5363   } else {
5364     out->specialized_chunk_word_size = ClassSpecializedChunk;
5365     out->small_chunk_word_size = ClassSmallChunk;
5366     out->medium_chunk_word_size = ClassMediumChunk;
5367   }
5368 }
5369