1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/macros.hpp"
  54 
  55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
  56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
  57 
  58 // Helper function that does a bunch of checks for a chunk.
  59 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
  60 
  61 // Given a Metachunk, update its in-use information (both in the
  62 // chunk and the occupancy map).
  63 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
  64 
  65 size_t const allocation_from_dictionary_limit = 4 * K;
  66 
  67 MetaWord* last_allocated = 0;
  68 
  69 size_t Metaspace::_compressed_class_space_size;
  70 const MetaspaceTracer* Metaspace::_tracer = NULL;
  71 
  72 DEBUG_ONLY(bool Metaspace::_frozen = false;)
  73 
  74 enum ChunkSizes {    // in words.
  75   ClassSpecializedChunk = 128,
  76   SpecializedChunk = 128,
  77   ClassSmallChunk = 256,
  78   SmallChunk = 512,
  79   ClassMediumChunk = 4 * K,
  80   MediumChunk = 8 * K
  81 };
  82 
  83 // Returns size of this chunk type.
  84 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
  85   assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
  86   size_t size = 0;
  87   if (is_class) {
  88     switch(chunktype) {
  89       case SpecializedIndex: size = ClassSpecializedChunk; break;
  90       case SmallIndex: size = ClassSmallChunk; break;
  91       case MediumIndex: size = ClassMediumChunk; break;
  92       default:
  93         ShouldNotReachHere();
  94     }
  95   } else {
  96     switch(chunktype) {
  97       case SpecializedIndex: size = SpecializedChunk; break;
  98       case SmallIndex: size = SmallChunk; break;
  99       case MediumIndex: size = MediumChunk; break;
 100       default:
 101         ShouldNotReachHere();
 102     }
 103   }
 104   return size;
 105 }
 106 
 107 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
 108   if (is_class) {
 109     if (size == ClassSpecializedChunk) {
 110       return SpecializedIndex;
 111     } else if (size == ClassSmallChunk) {
 112       return SmallIndex;
 113     } else if (size == ClassMediumChunk) {
 114       return MediumIndex;
 115     } else if (size > ClassMediumChunk) {
 116       assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
 117       return HumongousIndex;
 118     }
 119   } else {
 120     if (size == SpecializedChunk) {
 121       return SpecializedIndex;
 122     } else if (size == SmallChunk) {
 123       return SmallIndex;
 124     } else if (size == MediumChunk) {
 125       return MediumIndex;
 126     } else if (size > MediumChunk) {
 127       assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
 128       return HumongousIndex;
 129     }
 130   }
 131   ShouldNotReachHere();
 132   return (ChunkIndex)-1;
 133 }
 134 
 135 
 136 static ChunkIndex next_chunk_index(ChunkIndex i) {
 137   assert(i < NumberOfInUseLists, "Out of bound");
 138   return (ChunkIndex) (i+1);
 139 }
 140 
 141 static ChunkIndex prev_chunk_index(ChunkIndex i) {
 142   assert(i > ZeroIndex, "Out of bound");
 143   return (ChunkIndex) (i-1);
 144 }
 145 
 146 static const char* scale_unit(size_t scale) {
 147   switch(scale) {
 148     case 1: return "BYTES";
 149     case K: return "KB";
 150     case M: return "MB";
 151     case G: return "GB";
 152     default:
 153       ShouldNotReachHere();
 154       return NULL;
 155   }
 156 }
 157 
 158 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 159 uint MetaspaceGC::_shrink_factor = 0;
 160 bool MetaspaceGC::_should_concurrent_collect = false;
 161 
 162 typedef class FreeList<Metachunk> ChunkList;
 163 
 164 // Manages the global free lists of chunks.
 165 class ChunkManager : public CHeapObj<mtInternal> {
 166   friend class TestVirtualSpaceNodeTest;
 167 
 168   // Free list of chunks of different sizes.
 169   //   SpecializedChunk
 170   //   SmallChunk
 171   //   MediumChunk
 172   ChunkList _free_chunks[NumberOfFreeLists];
 173 
 174   // Whether or not this is the class chunkmanager.
 175   const bool _is_class;
 176 
 177   // Return non-humongous chunk list by its index.
 178   ChunkList* free_chunks(ChunkIndex index);
 179 
 180   // Returns non-humongous chunk list for the given chunk word size.
 181   ChunkList* find_free_chunks_list(size_t word_size);
 182 
 183   //   HumongousChunk
 184   ChunkTreeDictionary _humongous_dictionary;
 185 
 186   // Returns the humongous chunk dictionary.
 187   ChunkTreeDictionary* humongous_dictionary() {
 188     return &_humongous_dictionary;
 189   }
 190 
 191   // Size, in metaspace words, of all chunks managed by this ChunkManager
 192   size_t _free_chunks_total;
 193   // Number of chunks in this ChunkManager
 194   size_t _free_chunks_count;
 195 
 196   // Update counters after a chunk was added or removed removed.
 197   void account_for_added_chunk(const Metachunk* c);
 198   void account_for_removed_chunk(const Metachunk* c);
 199 
 200   // Debug support
 201 
 202   size_t sum_free_chunks();
 203   size_t sum_free_chunks_count();
 204 
 205   void locked_verify_free_chunks_total();
 206   void slow_locked_verify_free_chunks_total() {
 207     if (VerifyMetaspace) {
 208       locked_verify_free_chunks_total();
 209     }
 210   }
 211   void locked_verify_free_chunks_count();
 212   void slow_locked_verify_free_chunks_count() {
 213     if (VerifyMetaspace) {
 214       locked_verify_free_chunks_count();
 215     }
 216   }
 217   void verify_free_chunks_count();
 218 
 219   // Given a pointer to a chunk, attempts to merge it with neighboring
 220   // free chunks to form a bigger chunk. Returns true if successful.
 221   bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
 222 
 223   // Helper for chunk merging:
 224   //  Given an address range with 1-n chunks which are all supposed to be
 225   //  free and hence currently managed by this ChunkManager, remove them
 226   //  from this ChunkManager and mark them as invalid.
 227   // - This does not correct the occupancy map.
 228   // - This does not adjust the counters in ChunkManager.
 229   // - Does not adjust container count counter in containing VirtualSpaceNode.
 230   // Returns number of chunks removed.
 231   int remove_chunks_in_area(MetaWord* p, size_t word_size);
 232 
 233   // Helper for chunk splitting: given a target chunk size and a larger free chunk,
 234   // split up the larger chunk into n smaller chunks, at least one of which should be
 235   // the target chunk of target chunk size. The smaller chunks, including the target
 236   // chunk, are returned to the freelist. The pointer to the target chunk is returned.
 237   // Note that this chunk is supposed to be removed from the freelist right away.
 238   Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
 239 
 240  public:
 241 
 242   struct ChunkManagerStatistics {
 243     size_t num_by_type[NumberOfFreeLists];
 244     size_t single_size_by_type[NumberOfFreeLists];
 245     size_t total_size_by_type[NumberOfFreeLists];
 246     size_t num_humongous_chunks;
 247     size_t total_size_humongous_chunks;
 248   };
 249 
 250   void locked_get_statistics(ChunkManagerStatistics* stat) const;
 251   void get_statistics(ChunkManagerStatistics* stat) const;
 252   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 253 
 254 
 255   ChunkManager(bool is_class)
 256       : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
 257     _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
 258     _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
 259     _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
 260   }
 261 
 262   // Add or delete (return) a chunk to the global freelist.
 263   Metachunk* chunk_freelist_allocate(size_t word_size);
 264 
 265   // Map a size to a list index assuming that there are lists
 266   // for special, small, medium, and humongous chunks.
 267   ChunkIndex list_index(size_t size);
 268 
 269   // Map a given index to the chunk size.
 270   size_t size_by_index(ChunkIndex index) const;
 271 
 272   bool is_class() const { return _is_class; }
 273 
 274   // Convenience accessors.
 275   size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
 276   size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
 277   size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
 278 
 279   // Take a chunk from the ChunkManager. The chunk is expected to be in
 280   // the chunk manager (the freelist if non-humongous, the dictionary if
 281   // humongous).
 282   void remove_chunk(Metachunk* chunk);
 283 
 284   // Return a single chunk of type index to the ChunkManager.
 285   void return_single_chunk(ChunkIndex index, Metachunk* chunk);
 286 
 287   // Add the simple linked list of chunks to the freelist of chunks
 288   // of type index.
 289   void return_chunk_list(ChunkIndex index, Metachunk* chunk);
 290 
 291   // Total of the space in the free chunks list
 292   size_t free_chunks_total_words();
 293   size_t free_chunks_total_bytes();
 294 
 295   // Number of chunks in the free chunks list
 296   size_t free_chunks_count();
 297 
 298   // Remove from a list by size.  Selects list based on size of chunk.
 299   Metachunk* free_chunks_get(size_t chunk_word_size);
 300 
 301 #define index_bounds_check(index)                                         \
 302   assert(index == SpecializedIndex ||                                     \
 303          index == SmallIndex ||                                           \
 304          index == MediumIndex ||                                          \
 305          index == HumongousIndex, "Bad index: %d", (int) index)
 306 
 307   size_t num_free_chunks(ChunkIndex index) const {
 308     index_bounds_check(index);
 309 
 310     if (index == HumongousIndex) {
 311       return _humongous_dictionary.total_free_blocks();
 312     }
 313 
 314     ssize_t count = _free_chunks[index].count();
 315     return count == -1 ? 0 : (size_t) count;
 316   }
 317 
 318   size_t size_free_chunks_in_bytes(ChunkIndex index) const {
 319     index_bounds_check(index);
 320 
 321     size_t word_size = 0;
 322     if (index == HumongousIndex) {
 323       word_size = _humongous_dictionary.total_size();
 324     } else {
 325       const size_t size_per_chunk_in_words = _free_chunks[index].size();
 326       word_size = size_per_chunk_in_words * num_free_chunks(index);
 327     }
 328 
 329     return word_size * BytesPerWord;
 330   }
 331 
 332   MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
 333     return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
 334                                          num_free_chunks(SmallIndex),
 335                                          num_free_chunks(MediumIndex),
 336                                          num_free_chunks(HumongousIndex),
 337                                          size_free_chunks_in_bytes(SpecializedIndex),
 338                                          size_free_chunks_in_bytes(SmallIndex),
 339                                          size_free_chunks_in_bytes(MediumIndex),
 340                                          size_free_chunks_in_bytes(HumongousIndex));
 341   }
 342 
 343   // Debug support
 344   void verify();
 345   void slow_verify() {
 346     if (VerifyMetaspace) {
 347       verify();
 348     }
 349   }
 350   void locked_verify();
 351   void slow_locked_verify() {
 352     if (VerifyMetaspace) {
 353       locked_verify();
 354     }
 355   }
 356   void verify_free_chunks_total();
 357 
 358   void locked_print_free_chunks(outputStream* st);
 359   void locked_print_sum_free_chunks(outputStream* st);
 360 
 361   void print_on(outputStream* st) const;
 362 
 363   // Prints composition for both non-class and (if available)
 364   // class chunk manager.
 365   static void print_all_chunkmanagers(outputStream* out, size_t scale = 1);
 366 };
 367 
 368 class SmallBlocks : public CHeapObj<mtClass> {
 369   const static uint _small_block_max_size = sizeof(TreeChunk<Metablock,  FreeList<Metablock> >)/HeapWordSize;
 370   const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
 371 
 372  private:
 373   FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
 374 
 375   FreeList<Metablock>& list_at(size_t word_size) {
 376     assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
 377     return _small_lists[word_size - _small_block_min_size];
 378   }
 379 
 380  public:
 381   SmallBlocks() {
 382     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 383       uint k = i - _small_block_min_size;
 384       _small_lists[k].set_size(i);
 385     }
 386   }
 387 
 388   size_t total_size() const {
 389     size_t result = 0;
 390     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 391       uint k = i - _small_block_min_size;
 392       result = result + _small_lists[k].count() * _small_lists[k].size();
 393     }
 394     return result;
 395   }
 396 
 397   static uint small_block_max_size() { return _small_block_max_size; }
 398   static uint small_block_min_size() { return _small_block_min_size; }
 399 
 400   MetaWord* get_block(size_t word_size) {
 401     if (list_at(word_size).count() > 0) {
 402       MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
 403       return new_block;
 404     } else {
 405       return NULL;
 406     }
 407   }
 408   void return_block(Metablock* free_chunk, size_t word_size) {
 409     list_at(word_size).return_chunk_at_head(free_chunk, false);
 410     assert(list_at(word_size).count() > 0, "Should have a chunk");
 411   }
 412 
 413   void print_on(outputStream* st) const {
 414     st->print_cr("SmallBlocks:");
 415     for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
 416       uint k = i - _small_block_min_size;
 417       st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
 418     }
 419   }
 420 };
 421 
 422 // Used to manage the free list of Metablocks (a block corresponds
 423 // to the allocation of a quantum of metadata).
 424 class BlockFreelist : public CHeapObj<mtClass> {
 425   BlockTreeDictionary* const _dictionary;
 426   SmallBlocks* _small_blocks;
 427 
 428   // Only allocate and split from freelist if the size of the allocation
 429   // is at least 1/4th the size of the available block.
 430   const static int WasteMultiplier = 4;
 431 
 432   // Accessors
 433   BlockTreeDictionary* dictionary() const { return _dictionary; }
 434   SmallBlocks* small_blocks() {
 435     if (_small_blocks == NULL) {
 436       _small_blocks = new SmallBlocks();
 437     }
 438     return _small_blocks;
 439   }
 440 
 441  public:
 442   BlockFreelist();
 443   ~BlockFreelist();
 444 
 445   // Get and return a block to the free list
 446   MetaWord* get_block(size_t word_size);
 447   void return_block(MetaWord* p, size_t word_size);
 448 
 449   size_t total_size() const  {
 450     size_t result = dictionary()->total_size();
 451     if (_small_blocks != NULL) {
 452       result = result + _small_blocks->total_size();
 453     }
 454     return result;
 455   }
 456 
 457   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
 458   void print_on(outputStream* st) const;
 459 };
 460 
 461 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
 462 template <typename T> struct all_ones  { static const T value; };
 463 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
 464 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
 465 
 466 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
 467 // keeps information about
 468 // - where a chunk starts
 469 // - whether a chunk is in-use or free
 470 // A bit in this bitmap represents one range of memory in the smallest
 471 // chunk size (SpecializedChunk or ClassSpecializedChunk).
 472 class OccupancyMap : public CHeapObj<mtInternal> {
 473 
 474   // The address range this map covers.
 475   const MetaWord* const _reference_address;
 476   const size_t _word_size;
 477 
 478   // The word size of a specialized chunk, aka the number of words one
 479   // bit in this map represents.
 480   const size_t _smallest_chunk_word_size;
 481 
 482   // map data
 483   // Data are organized in two bit layers:
 484   // The first layer is the chunk-start-map. Here, a bit is set to mark
 485   // the corresponding region as the head of a chunk.
 486   // The second layer is the in-use-map. Here, a set bit indicates that
 487   // the corresponding belongs to a chunk which is in use.
 488   uint8_t* _map[2];
 489 
 490   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
 491 
 492   // length, in bytes, of bitmap data
 493   size_t _map_size;
 494 
 495   // Returns true if bit at position pos at bit-layer layer is set.
 496   bool get_bit_at_position(unsigned pos, unsigned layer) const {
 497     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 498     const unsigned byteoffset = pos / 8;
 499     assert(byteoffset < _map_size,
 500            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 501     const unsigned mask = 1 << (pos % 8);
 502     return (_map[layer][byteoffset] & mask) > 0;
 503   }
 504 
 505   // Changes bit at position pos at bit-layer layer to value v.
 506   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
 507     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
 508     const unsigned byteoffset = pos / 8;
 509     assert(byteoffset < _map_size,
 510            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 511     const unsigned mask = 1 << (pos % 8);
 512     if (v) {
 513       _map[layer][byteoffset] |= mask;
 514     } else {
 515       _map[layer][byteoffset] &= ~mask;
 516     }
 517   }
 518 
 519   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
 520   // pos is 32/64 aligned and num_bits is 32/64.
 521   // This is the typical case when coalescing to medium chunks, whose size is
 522   // 32 or 64 times the specialized chunk size (depending on class or non class
 523   // case), so they occupy 64 bits which should be 64bit aligned, because
 524   // chunks are chunk-size aligned.
 525   template <typename T>
 526   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 527     assert(_map_size > 0, "not initialized");
 528     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 529     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 530     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 531     const size_t byteoffset = pos / 8;
 532     assert(byteoffset <= (_map_size - sizeof(T)),
 533            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 534     const T w = *(T*)(_map[layer] + byteoffset);
 535     return w > 0 ? true : false;
 536   }
 537 
 538   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 539   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 540     if (pos % 32 == 0 && num_bits == 32) {
 541       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 542     } else if (pos % 64 == 0 && num_bits == 64) {
 543       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 544     } else {
 545       for (unsigned n = 0; n < num_bits; n ++) {
 546         if (get_bit_at_position(pos + n, layer)) {
 547           return true;
 548         }
 549       }
 550     }
 551     return false;
 552   }
 553 
 554   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 555   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 556     assert(word_size % _smallest_chunk_word_size == 0,
 557         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 558     const unsigned pos = get_bitpos_for_address(p);
 559     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 560     return is_any_bit_set_in_region(pos, num_bits, layer);
 561   }
 562 
 563   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 564   // pos is 32/64 aligned and num_bits is 32/64.
 565   // This is the typical case when coalescing to medium chunks, whose size
 566   // is 32 or 64 times the specialized chunk size (depending on class or non
 567   // class case), so they occupy 64 bits which should be 64bit aligned,
 568   // because chunks are chunk-size aligned.
 569   template <typename T>
 570   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 571     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 572            (unsigned)(sizeof(T) * 8), pos);
 573     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 574            num_bits, (unsigned)(sizeof(T) * 8));
 575     const size_t byteoffset = pos / 8;
 576     assert(byteoffset <= (_map_size - sizeof(T)),
 577            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 578     T* const pw = (T*)(_map[layer] + byteoffset);
 579     *pw = v ? all_ones<T>::value : (T) 0;
 580   }
 581 
 582   // Set all bits in a region starting at pos to a value.
 583   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 584     assert(_map_size > 0, "not initialized");
 585     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 586     if (pos % 32 == 0 && num_bits == 32) {
 587       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 588     } else if (pos % 64 == 0 && num_bits == 64) {
 589       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 590     } else {
 591       for (unsigned n = 0; n < num_bits; n ++) {
 592         set_bit_at_position(pos + n, layer, v);
 593       }
 594     }
 595   }
 596 
 597   // Helper: sets all bits in a region [p, p+word_size).
 598   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 599     assert(word_size % _smallest_chunk_word_size == 0,
 600         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 601     const unsigned pos = get_bitpos_for_address(p);
 602     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 603     set_bits_of_region(pos, num_bits, layer, v);
 604   }
 605 
 606   // Helper: given an address, return the bit position representing that address.
 607   unsigned get_bitpos_for_address(const MetaWord* p) const {
 608     assert(_reference_address != NULL, "not initialized");
 609     assert(p >= _reference_address && p < _reference_address + _word_size,
 610            "Address %p out of range for occupancy map [%p..%p).",
 611             p, _reference_address, _reference_address + _word_size);
 612     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 613            "Address not aligned (%p).", p);
 614     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 615     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 616     return (unsigned) d;
 617   }
 618 
 619  public:
 620 
 621   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
 622     _reference_address(reference_address), _word_size(word_size),
 623     _smallest_chunk_word_size(smallest_chunk_word_size) {
 624     assert(reference_address != NULL, "invalid reference address");
 625     assert(is_aligned(reference_address, smallest_chunk_word_size),
 626            "Reference address not aligned to smallest chunk size.");
 627     assert(is_aligned(word_size, smallest_chunk_word_size),
 628            "Word_size shall be a multiple of the smallest chunk size.");
 629     // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
 630     size_t num_bits = word_size / smallest_chunk_word_size;
 631     _map_size = (num_bits + 7) / 8;
 632     assert(_map_size * 8 >= num_bits, "sanity");
 633     _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
 634     _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
 635     assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
 636     memset(_map[1], 0, _map_size);
 637     memset(_map[0], 0, _map_size);
 638     // Sanity test: the first respectively last possible chunk start address in
 639     // the covered range shall map to the first and last bit in the bitmap.
 640     assert(get_bitpos_for_address(reference_address) == 0,
 641       "First chunk address in range must map to fist bit in bitmap.");
 642     assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
 643       "Last chunk address in range must map to last bit in bitmap.");
 644   }
 645 
 646   ~OccupancyMap() {
 647     os::free(_map[0]);
 648     os::free(_map[1]);
 649   }
 650 
 651   // Returns true if at address x a chunk is starting.
 652   bool chunk_starts_at_address(MetaWord* p) const {
 653     const unsigned pos = get_bitpos_for_address(p);
 654     return get_bit_at_position(pos, layer_chunk_start_map);
 655   }
 656 
 657   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 658     const unsigned pos = get_bitpos_for_address(p);
 659     set_bit_at_position(pos, layer_chunk_start_map, v);
 660   }
 661 
 662   // Removes all chunk-start-bits inside a region, typically as a
 663   // result of a chunk merge.
 664   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 665     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 666   }
 667 
 668   // Returns true if there are life (in use) chunks in the region limited
 669   // by [p, p+word_size).
 670   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 671     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 672   }
 673 
 674   // Marks the region starting at p with the size word_size as in use
 675   // or free, depending on v.
 676   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 677     set_bits_of_region(p, word_size, layer_in_use_map, v);
 678   }
 679 
 680 #ifdef ASSERT
 681   // Verify occupancy map for the address range [from, to).
 682   // We need to tell it the address range, because the memory the
 683   // occupancy map is covering may not be fully comitted yet.
 684   void verify(MetaWord* from, MetaWord* to) {
 685     Metachunk* chunk = NULL;
 686     int nth_bit_for_chunk = 0;
 687     MetaWord* chunk_end = NULL;
 688     for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
 689       const unsigned pos = get_bitpos_for_address(p);
 690       // Check the chunk-starts-info:
 691       if (get_bit_at_position(pos, layer_chunk_start_map)) {
 692         // Chunk start marked in bitmap.
 693         chunk = (Metachunk*) p;
 694         if (chunk_end != NULL) {
 695           assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
 696                  "the next chunk to start at %p).", p, chunk_end);
 697         }
 698         assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
 699         if (chunk->get_chunk_type() != HumongousIndex) {
 700           guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
 701         }
 702         chunk_end = p + chunk->word_size();
 703         nth_bit_for_chunk = 0;
 704         assert(chunk_end <= to, "Chunk end overlaps test address range.");
 705       } else {
 706         // No chunk start marked in bitmap.
 707         assert(chunk != NULL, "Chunk should start at start of address range.");
 708         assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
 709         nth_bit_for_chunk ++;
 710       }
 711       // Check the in-use-info:
 712       const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
 713       if (in_use_bit) {
 714         assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
 715                chunk, nth_bit_for_chunk);
 716       } else {
 717         assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
 718                chunk, nth_bit_for_chunk);
 719       }
 720     }
 721   }
 722 
 723   // Verify that a given chunk is correctly accounted for in the bitmap.
 724   void verify_for_chunk(Metachunk* chunk) {
 725     assert(chunk_starts_at_address((MetaWord*) chunk),
 726            "No chunk start marked in map for chunk %p.", chunk);
 727     // For chunks larger than the minimal chunk size, no other chunk
 728     // must start in its area.
 729     if (chunk->word_size() > _smallest_chunk_word_size) {
 730       assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
 731                                        chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
 732              "No chunk must start within another chunk.");
 733     }
 734     if (!chunk->is_tagged_free()) {
 735       assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 736              "Chunk %p is in use but marked as free in map (%d %d).",
 737              chunk, chunk->get_chunk_type(), chunk->get_origin());
 738     } else {
 739       assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
 740              "Chunk %p is free but marked as in-use in map (%d %d).",
 741              chunk, chunk->get_chunk_type(), chunk->get_origin());
 742     }
 743   }
 744 
 745 #endif // ASSERT
 746 
 747 };
 748 
 749 // A VirtualSpaceList node.
 750 class VirtualSpaceNode : public CHeapObj<mtClass> {
 751   friend class VirtualSpaceList;
 752 
 753   // Link to next VirtualSpaceNode
 754   VirtualSpaceNode* _next;
 755 
 756   // Whether this node is contained in class or metaspace.
 757   const bool _is_class;
 758 
 759   // total in the VirtualSpace
 760   MemRegion _reserved;
 761   ReservedSpace _rs;
 762   VirtualSpace _virtual_space;
 763   MetaWord* _top;
 764   // count of chunks contained in this VirtualSpace
 765   uintx _container_count;
 766 
 767   OccupancyMap* _occupancy_map;
 768 
 769   // Convenience functions to access the _virtual_space
 770   char* low()  const { return virtual_space()->low(); }
 771   char* high() const { return virtual_space()->high(); }
 772 
 773   // The first Metachunk will be allocated at the bottom of the
 774   // VirtualSpace
 775   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 776 
 777   // Committed but unused space in the virtual space
 778   size_t free_words_in_vs() const;
 779 
 780   // True if this node belongs to class metaspace.
 781   bool is_class() const { return _is_class; }
 782 
 783   // Helper function for take_from_committed: allocate padding chunks
 784   // until top is at the given address.
 785   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
 786 
 787  public:
 788 
 789   VirtualSpaceNode(bool is_class, size_t byte_size);
 790   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
 791     _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
 792   ~VirtualSpaceNode();
 793 
 794   // Convenience functions for logical bottom and end
 795   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 796   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 797 
 798   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
 799   OccupancyMap* occupancy_map() { return _occupancy_map; }
 800 
 801   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 802 
 803   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
 804   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 805 
 806   bool is_pre_committed() const { return _virtual_space.special(); }
 807 
 808   // address of next available space in _virtual_space;
 809   // Accessors
 810   VirtualSpaceNode* next() { return _next; }
 811   void set_next(VirtualSpaceNode* v) { _next = v; }
 812 
 813   void set_reserved(MemRegion const v) { _reserved = v; }
 814   void set_top(MetaWord* v) { _top = v; }
 815 
 816   // Accessors
 817   MemRegion* reserved() { return &_reserved; }
 818   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 819 
 820   // Returns true if "word_size" is available in the VirtualSpace
 821   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 822 
 823   MetaWord* top() const { return _top; }
 824   void inc_top(size_t word_size) { _top += word_size; }
 825 
 826   uintx container_count() { return _container_count; }
 827   void inc_container_count();
 828   void dec_container_count();
 829 #ifdef ASSERT
 830   uintx container_count_slow();
 831   void verify_container_count();
 832 #endif
 833 
 834   // used and capacity in this single entry in the list
 835   size_t used_words_in_vs() const;
 836   size_t capacity_words_in_vs() const;
 837 
 838   bool initialize();
 839 
 840   // get space from the virtual space
 841   Metachunk* take_from_committed(size_t chunk_word_size);
 842 
 843   // Allocate a chunk from the virtual space and return it.
 844   Metachunk* get_chunk_vs(size_t chunk_word_size);
 845 
 846   // Expands/shrinks the committed space in a virtual space.  Delegates
 847   // to Virtualspace
 848   bool expand_by(size_t min_words, size_t preferred_words);
 849 
 850   // In preparation for deleting this node, remove all the chunks
 851   // in the node from any freelist.
 852   void purge(ChunkManager* chunk_manager);
 853 
 854   // If an allocation doesn't fit in the current node a new node is created.
 855   // Allocate chunks out of the remaining committed space in this node
 856   // to avoid wasting that memory.
 857   // This always adds up because all the chunk sizes are multiples of
 858   // the smallest chunk size.
 859   void retire(ChunkManager* chunk_manager);
 860 
 861 
 862   void print_on(outputStream* st) const;
 863   void print_map(outputStream* st, bool is_class) const;
 864 
 865   // Debug support
 866   DEBUG_ONLY(void mangle();)
 867   // Verify counters, all chunks in this list node and the occupancy map.
 868   DEBUG_ONLY(void verify();)
 869   // Verify that all free chunks in this node are ideally merged
 870   // (there not should be multiple small chunks where a large chunk could exist.)
 871   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
 872 
 873 };
 874 
 875 #define assert_is_aligned(value, alignment)                  \
 876   assert(is_aligned((value), (alignment)),                   \
 877          SIZE_FORMAT_HEX " is not aligned to "               \
 878          SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 879 
 880 // Decide if large pages should be committed when the memory is reserved.
 881 static bool should_commit_large_pages_when_reserving(size_t bytes) {
 882   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
 883     size_t words = bytes / BytesPerWord;
 884     bool is_class = false; // We never reserve large pages for the class space.
 885     if (MetaspaceGC::can_expand(words, is_class) &&
 886         MetaspaceGC::allowed_expansion() >= words) {
 887       return true;
 888     }
 889   }
 890 
 891   return false;
 892 }
 893 
 894   // byte_size is the size of the associated virtualspace.
 895 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
 896   _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
 897   assert_is_aligned(bytes, Metaspace::reserve_alignment());
 898   bool large_pages = should_commit_large_pages_when_reserving(bytes);
 899   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 900 
 901   if (_rs.is_reserved()) {
 902     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 903     assert(_rs.size() != 0, "Catch if we get a 0 size");
 904     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
 905     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 906 
 907     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 908   }
 909 }
 910 
 911 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 912   DEBUG_ONLY(this->verify();)
 913   Metachunk* chunk = first_chunk();
 914   Metachunk* invalid_chunk = (Metachunk*) top();
 915   while (chunk < invalid_chunk ) {
 916     assert(chunk->is_tagged_free(), "Should be tagged free");
 917     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 918     chunk_manager->remove_chunk(chunk);
 919     chunk->remove_sentinel();
 920     assert(chunk->next() == NULL &&
 921            chunk->prev() == NULL,
 922            "Was not removed from its list");
 923     chunk = (Metachunk*) next;
 924   }
 925 }
 926 
 927 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 928 
 929   if (bottom() == top()) {
 930     return;
 931   }
 932 
 933   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 934   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 935   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 936 
 937   int line_len = 100;
 938   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 939   line_len = (int)(section_len / spec_chunk_size);
 940 
 941   static const int NUM_LINES = 4;
 942 
 943   char* lines[NUM_LINES];
 944   for (int i = 0; i < NUM_LINES; i ++) {
 945     lines[i] = (char*)os::malloc(line_len, mtInternal);
 946   }
 947   int pos = 0;
 948   const MetaWord* p = bottom();
 949   const Metachunk* chunk = (const Metachunk*)p;
 950   const MetaWord* chunk_end = p + chunk->word_size();
 951   while (p < top()) {
 952     if (pos == line_len) {
 953       pos = 0;
 954       for (int i = 0; i < NUM_LINES; i ++) {
 955         st->fill_to(22);
 956         st->print_raw(lines[i], line_len);
 957         st->cr();
 958       }
 959     }
 960     if (pos == 0) {
 961       st->print(PTR_FORMAT ":", p2i(p));
 962     }
 963     if (p == chunk_end) {
 964       chunk = (Metachunk*)p;
 965       chunk_end = p + chunk->word_size();
 966     }
 967     // line 1: chunk starting points (a dot if that area is a chunk start).
 968     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 969 
 970     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 971     // chunk is in use.
 972     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 973     if (chunk->word_size() == spec_chunk_size) {
 974       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 975     } else if (chunk->word_size() == small_chunk_size) {
 976       lines[1][pos] = chunk_is_free ? 's' : 'S';
 977     } else if (chunk->word_size() == med_chunk_size) {
 978       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 979     } else if (chunk->word_size() > med_chunk_size) {
 980       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 981     } else {
 982       ShouldNotReachHere();
 983     }
 984 
 985     // Line 3: chunk origin
 986     const ChunkOrigin origin = chunk->get_origin();
 987     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 988 
 989     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 990     //         but were never used.
 991     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 992 
 993     p += spec_chunk_size;
 994     pos ++;
 995   }
 996   if (pos > 0) {
 997     for (int i = 0; i < NUM_LINES; i ++) {
 998       st->fill_to(22);
 999       st->print_raw(lines[i], line_len);
1000       st->cr();
1001     }
1002   }
1003   for (int i = 0; i < NUM_LINES; i ++) {
1004     os::free(lines[i]);
1005   }
1006 }
1007 
1008 
1009 #ifdef ASSERT
1010 uintx VirtualSpaceNode::container_count_slow() {
1011   uintx count = 0;
1012   Metachunk* chunk = first_chunk();
1013   Metachunk* invalid_chunk = (Metachunk*) top();
1014   while (chunk < invalid_chunk ) {
1015     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1016     do_verify_chunk(chunk);
1017     // Don't count the chunks on the free lists.  Those are
1018     // still part of the VirtualSpaceNode but not currently
1019     // counted.
1020     if (!chunk->is_tagged_free()) {
1021       count++;
1022     }
1023     chunk = (Metachunk*) next;
1024   }
1025   return count;
1026 }
1027 #endif
1028 
1029 #ifdef ASSERT
1030 // Verify counters, all chunks in this list node and the occupancy map.
1031 void VirtualSpaceNode::verify() {
1032   uintx num_in_use_chunks = 0;
1033   Metachunk* chunk = first_chunk();
1034   Metachunk* invalid_chunk = (Metachunk*) top();
1035 
1036   // Iterate the chunks in this node and verify each chunk.
1037   while (chunk < invalid_chunk ) {
1038     DEBUG_ONLY(do_verify_chunk(chunk);)
1039     if (!chunk->is_tagged_free()) {
1040       num_in_use_chunks ++;
1041     }
1042     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1043     chunk = (Metachunk*) next;
1044   }
1045   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
1046          ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
1047   // Also verify the occupancy map.
1048   occupancy_map()->verify(this->bottom(), this->top());
1049 }
1050 #endif // ASSERT
1051 
1052 #ifdef ASSERT
1053 // Verify that all free chunks in this node are ideally merged
1054 // (there not should be multiple small chunks where a large chunk could exist.)
1055 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
1056   Metachunk* chunk = first_chunk();
1057   Metachunk* invalid_chunk = (Metachunk*) top();
1058   // Shorthands.
1059   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1060   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1061   int num_free_chunks_since_last_med_boundary = -1;
1062   int num_free_chunks_since_last_small_boundary = -1;
1063   while (chunk < invalid_chunk ) {
1064     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
1065     // Reset the counter when encountering a non-free chunk.
1066     if (chunk->get_chunk_type() != HumongousIndex) {
1067       if (chunk->is_tagged_free()) {
1068         // Count successive free, non-humongous chunks.
1069         if (is_aligned(chunk, size_small)) {
1070           assert(num_free_chunks_since_last_small_boundary <= 1,
1071                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1072           num_free_chunks_since_last_small_boundary = 0;
1073         } else if (num_free_chunks_since_last_small_boundary != -1) {
1074           num_free_chunks_since_last_small_boundary ++;
1075         }
1076         if (is_aligned(chunk, size_med)) {
1077           assert(num_free_chunks_since_last_med_boundary <= 1,
1078                  "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1079           num_free_chunks_since_last_med_boundary = 0;
1080         } else if (num_free_chunks_since_last_med_boundary != -1) {
1081           num_free_chunks_since_last_med_boundary ++;
1082         }
1083       } else {
1084         // Encountering a non-free chunk, reset counters.
1085         num_free_chunks_since_last_med_boundary = -1;
1086         num_free_chunks_since_last_small_boundary = -1;
1087       }
1088     } else {
1089       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1090       num_free_chunks_since_last_med_boundary = -1;
1091       num_free_chunks_since_last_small_boundary = -1;
1092     }
1093 
1094     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1095     chunk = (Metachunk*) next;
1096   }
1097 }
1098 #endif // ASSERT
1099 
1100 // List of VirtualSpaces for metadata allocation.
1101 class VirtualSpaceList : public CHeapObj<mtClass> {
1102   friend class VirtualSpaceNode;
1103 
1104   enum VirtualSpaceSizes {
1105     VirtualSpaceSize = 256 * K
1106   };
1107 
1108   // Head of the list
1109   VirtualSpaceNode* _virtual_space_list;
1110   // virtual space currently being used for allocations
1111   VirtualSpaceNode* _current_virtual_space;
1112 
1113   // Is this VirtualSpaceList used for the compressed class space
1114   bool _is_class;
1115 
1116   // Sum of reserved and committed memory in the virtual spaces
1117   size_t _reserved_words;
1118   size_t _committed_words;
1119 
1120   // Number of virtual spaces
1121   size_t _virtual_space_count;
1122 
1123   ~VirtualSpaceList();
1124 
1125   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
1126 
1127   void set_virtual_space_list(VirtualSpaceNode* v) {
1128     _virtual_space_list = v;
1129   }
1130   void set_current_virtual_space(VirtualSpaceNode* v) {
1131     _current_virtual_space = v;
1132   }
1133 
1134   void link_vs(VirtualSpaceNode* new_entry);
1135 
1136   // Get another virtual space and add it to the list.  This
1137   // is typically prompted by a failed attempt to allocate a chunk
1138   // and is typically followed by the allocation of a chunk.
1139   bool create_new_virtual_space(size_t vs_word_size);
1140 
1141   // Chunk up the unused committed space in the current
1142   // virtual space and add the chunks to the free list.
1143   void retire_current_virtual_space();
1144 
1145  public:
1146   VirtualSpaceList(size_t word_size);
1147   VirtualSpaceList(ReservedSpace rs);
1148 
1149   size_t free_bytes();
1150 
1151   Metachunk* get_new_chunk(size_t chunk_word_size,
1152                            size_t suggested_commit_granularity);
1153 
1154   bool expand_node_by(VirtualSpaceNode* node,
1155                       size_t min_words,
1156                       size_t preferred_words);
1157 
1158   bool expand_by(size_t min_words,
1159                  size_t preferred_words);
1160 
1161   VirtualSpaceNode* current_virtual_space() {
1162     return _current_virtual_space;
1163   }
1164 
1165   bool is_class() const { return _is_class; }
1166 
1167   bool initialization_succeeded() { return _virtual_space_list != NULL; }
1168 
1169   size_t reserved_words()  { return _reserved_words; }
1170   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
1171   size_t committed_words() { return _committed_words; }
1172   size_t committed_bytes() { return committed_words() * BytesPerWord; }
1173 
1174   void inc_reserved_words(size_t v);
1175   void dec_reserved_words(size_t v);
1176   void inc_committed_words(size_t v);
1177   void dec_committed_words(size_t v);
1178   void inc_virtual_space_count();
1179   void dec_virtual_space_count();
1180 
1181   bool contains(const void* ptr);
1182 
1183   // Unlink empty VirtualSpaceNodes and free it.
1184   void purge(ChunkManager* chunk_manager);
1185 
1186   void print_on(outputStream* st) const;
1187   void print_map(outputStream* st) const;
1188 
1189   class VirtualSpaceListIterator : public StackObj {
1190     VirtualSpaceNode* _virtual_spaces;
1191    public:
1192     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
1193       _virtual_spaces(virtual_spaces) {}
1194 
1195     bool repeat() {
1196       return _virtual_spaces != NULL;
1197     }
1198 
1199     VirtualSpaceNode* get_next() {
1200       VirtualSpaceNode* result = _virtual_spaces;
1201       if (_virtual_spaces != NULL) {
1202         _virtual_spaces = _virtual_spaces->next();
1203       }
1204       return result;
1205     }
1206   };
1207 };
1208 
1209 class Metadebug : AllStatic {
1210   // Debugging support for Metaspaces
1211   static int _allocation_fail_alot_count;
1212 
1213  public:
1214 
1215   static void init_allocation_fail_alot_count();
1216 #ifdef ASSERT
1217   static bool test_metadata_failure();
1218 #endif
1219 
1220   static void test_anon_delegating_metaspace_sizing(ClassLoaderMetaspace* msp) PRODUCT_RETURN;
1221 };
1222 
1223 int Metadebug::_allocation_fail_alot_count = 0;
1224 
1225 //  SpaceManager - used by Metaspace to handle allocations
1226 class SpaceManager : public CHeapObj<mtClass> {
1227   friend class ClassLoaderMetaspace;
1228   friend class Metadebug;
1229 
1230  private:
1231 
1232   // protects allocations
1233   Mutex* const _lock;
1234 
1235   // Type of metadata allocated.
1236   const Metaspace::MetadataType   _mdtype;
1237 
1238   // Type of metaspace
1239   const Metaspace::MetaspaceType  _space_type;
1240 
1241   // List of chunks in use by this SpaceManager.  Allocations
1242   // are done from the current chunk.  The list is used for deallocating
1243   // chunks when the SpaceManager is freed.
1244   Metachunk* _chunks_in_use[NumberOfInUseLists];
1245   Metachunk* _current_chunk;
1246 
1247   // Maximum number of small chunks to allocate to a SpaceManager
1248   static uint const _small_chunk_limit;
1249 
1250   // Maximum number of specialize chunks to allocate for anonymous and delegating
1251   // metadata space to a SpaceManager
1252   static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1253 
1254   // Sum of all space in allocated chunks
1255   size_t _allocated_blocks_words;
1256 
1257   // Sum of all allocated chunks
1258   size_t _allocated_chunks_words;
1259   size_t _allocated_chunks_count;
1260 
1261   // Free lists of blocks are per SpaceManager since they
1262   // are assumed to be in chunks in use by the SpaceManager
1263   // and all chunks in use by a SpaceManager are freed when
1264   // the class loader using the SpaceManager is collected.
1265   BlockFreelist* _block_freelists;
1266 
1267   // protects virtualspace and chunk expansions
1268   static const char*  _expand_lock_name;
1269   static const int    _expand_lock_rank;
1270   static Mutex* const _expand_lock;
1271 
1272  private:
1273   // Accessors
1274   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
1275   void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
1276     _chunks_in_use[index] = v;
1277   }
1278 
1279   BlockFreelist* block_freelists() const { return _block_freelists; }
1280 
1281   Metaspace::MetadataType mdtype() { return _mdtype; }
1282 
1283   VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
1284   ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
1285 
1286   Metachunk* current_chunk() const { return _current_chunk; }
1287   void set_current_chunk(Metachunk* v) {
1288     _current_chunk = v;
1289   }
1290 
1291   Metachunk* find_current_chunk(size_t word_size);
1292 
1293   // Add chunk to the list of chunks in use
1294   void add_chunk(Metachunk* v, bool make_current);
1295   void retire_current_chunk();
1296 
1297   Mutex* lock() const { return _lock; }
1298 
1299  protected:
1300   void initialize();
1301 
1302  public:
1303   SpaceManager(Metaspace::MetadataType mdtype,
1304                Metaspace::MetaspaceType space_type,
1305                Mutex* lock);
1306   ~SpaceManager();
1307 
1308   enum ChunkMultiples {
1309     MediumChunkMultiple = 4
1310   };
1311 
1312   static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
1313   static size_t small_chunk_size(bool is_class)       { return is_class ? ClassSmallChunk : SmallChunk; }
1314   static size_t medium_chunk_size(bool is_class)      { return is_class ? ClassMediumChunk : MediumChunk; }
1315 
1316   static size_t smallest_chunk_size(bool is_class)    { return specialized_chunk_size(is_class); }
1317 
1318   // Accessors
1319   bool is_class() const { return _mdtype == Metaspace::ClassType; }
1320   Metaspace::MetaspaceType metaspace_type() const { return _space_type; }
1321 
1322   size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
1323   size_t small_chunk_size()       const { return small_chunk_size(is_class()); }
1324   size_t medium_chunk_size()      const { return medium_chunk_size(is_class()); }
1325 
1326   size_t smallest_chunk_size()    const { return smallest_chunk_size(is_class()); }
1327 
1328   size_t medium_chunk_bunch()     const { return medium_chunk_size() * MediumChunkMultiple; }
1329 
1330   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
1331   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
1332   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
1333   size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
1334   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
1335 
1336   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
1337 
1338   static Mutex* expand_lock() { return _expand_lock; }
1339 
1340   // Increment the per Metaspace and global running sums for Metachunks
1341   // by the given size.  This is used when a Metachunk to added to
1342   // the in-use list.
1343   void inc_size_metrics(size_t words);
1344   // Increment the per Metaspace and global running sums Metablocks by the given
1345   // size.  This is used when a Metablock is allocated.
1346   void inc_used_metrics(size_t words);
1347   // Delete the portion of the running sums for this SpaceManager. That is,
1348   // the globals running sums for the Metachunks and Metablocks are
1349   // decremented for all the Metachunks in-use by this SpaceManager.
1350   void dec_total_from_size_metrics();
1351 
1352   // Adjust the initial chunk size to match one of the fixed chunk list sizes,
1353   // or return the unadjusted size if the requested size is humongous.
1354   static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1355   size_t adjust_initial_chunk_size(size_t requested) const;
1356 
1357   // Get the initial chunks size for this metaspace type.
1358   size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1359 
1360   size_t sum_capacity_in_chunks_in_use() const;
1361   size_t sum_used_in_chunks_in_use() const;
1362   size_t sum_free_in_chunks_in_use() const;
1363   size_t sum_waste_in_chunks_in_use() const;
1364   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1365 
1366   size_t sum_count_in_chunks_in_use();
1367   size_t sum_count_in_chunks_in_use(ChunkIndex i);
1368 
1369   Metachunk* get_new_chunk(size_t chunk_word_size);
1370 
1371   // Block allocation and deallocation.
1372   // Allocates a block from the current chunk
1373   MetaWord* allocate(size_t word_size);
1374 
1375   // Helper for allocations
1376   MetaWord* allocate_work(size_t word_size);
1377 
1378   // Returns a block to the per manager freelist
1379   void deallocate(MetaWord* p, size_t word_size);
1380 
1381   // Based on the allocation size and a minimum chunk size,
1382   // returned chunk size (for expanding space for chunk allocation).
1383   size_t calc_chunk_size(size_t allocation_word_size);
1384 
1385   // Called when an allocation from the current chunk fails.
1386   // Gets a new chunk (may require getting a new virtual space),
1387   // and allocates from that chunk.
1388   MetaWord* grow_and_allocate(size_t word_size);
1389 
1390   // Notify memory usage to MemoryService.
1391   void track_metaspace_memory_usage();
1392 
1393   // debugging support.
1394 
1395   void dump(outputStream* const out) const;
1396   void print_on(outputStream* st) const;
1397   void locked_print_chunks_in_use_on(outputStream* st) const;
1398 
1399   void verify();
1400   void verify_chunk_size(Metachunk* chunk);
1401 #ifdef ASSERT
1402   void verify_allocated_blocks_words();
1403 #endif
1404 
1405   // This adjusts the size given to be greater than the minimum allocation size in
1406   // words for data in metaspace.  Esentially the minimum size is currently 3 words.
1407   size_t get_allocation_word_size(size_t word_size) {
1408     size_t byte_size = word_size * BytesPerWord;
1409 
1410     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1411     raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1412 
1413     size_t raw_word_size = raw_bytes_size / BytesPerWord;
1414     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1415 
1416     return raw_word_size;
1417   }
1418 };
1419 
1420 uint const SpaceManager::_small_chunk_limit = 4;
1421 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1422 
1423 const char* SpaceManager::_expand_lock_name =
1424   "SpaceManager chunk allocation lock";
1425 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1426 Mutex* const SpaceManager::_expand_lock =
1427   new Mutex(SpaceManager::_expand_lock_rank,
1428             SpaceManager::_expand_lock_name,
1429             Mutex::_allow_vm_block_flag,
1430             Monitor::_safepoint_check_never);
1431 
1432 void VirtualSpaceNode::inc_container_count() {
1433   assert_lock_strong(SpaceManager::expand_lock());
1434   _container_count++;
1435 }
1436 
1437 void VirtualSpaceNode::dec_container_count() {
1438   assert_lock_strong(SpaceManager::expand_lock());
1439   _container_count--;
1440 }
1441 
1442 #ifdef ASSERT
1443 void VirtualSpaceNode::verify_container_count() {
1444   assert(_container_count == container_count_slow(),
1445          "Inconsistency in container_count _container_count " UINTX_FORMAT
1446          " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
1447 }
1448 #endif
1449 
1450 // BlockFreelist methods
1451 
1452 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
1453 
1454 BlockFreelist::~BlockFreelist() {
1455   delete _dictionary;
1456   if (_small_blocks != NULL) {
1457     delete _small_blocks;
1458   }
1459 }
1460 
1461 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
1462   assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
1463 
1464   Metablock* free_chunk = ::new (p) Metablock(word_size);
1465   if (word_size < SmallBlocks::small_block_max_size()) {
1466     small_blocks()->return_block(free_chunk, word_size);
1467   } else {
1468   dictionary()->return_chunk(free_chunk);
1469 }
1470   log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
1471             SIZE_FORMAT, p2i(free_chunk), word_size);
1472 }
1473 
1474 MetaWord* BlockFreelist::get_block(size_t word_size) {
1475   assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
1476 
1477   // Try small_blocks first.
1478   if (word_size < SmallBlocks::small_block_max_size()) {
1479     // Don't create small_blocks() until needed.  small_blocks() allocates the small block list for
1480     // this space manager.
1481     MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
1482     if (new_block != NULL) {
1483       log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1484               p2i(new_block), word_size);
1485       return new_block;
1486     }
1487   }
1488 
1489   if (word_size < BlockFreelist::min_dictionary_size()) {
1490     // If allocation in small blocks fails, this is Dark Matter.  Too small for dictionary.
1491     return NULL;
1492   }
1493 
1494   Metablock* free_block = dictionary()->get_chunk(word_size);
1495   if (free_block == NULL) {
1496     return NULL;
1497   }
1498 
1499   const size_t block_size = free_block->size();
1500   if (block_size > WasteMultiplier * word_size) {
1501     return_block((MetaWord*)free_block, block_size);
1502     return NULL;
1503   }
1504 
1505   MetaWord* new_block = (MetaWord*)free_block;
1506   assert(block_size >= word_size, "Incorrect size of block from freelist");
1507   const size_t unused = block_size - word_size;
1508   if (unused >= SmallBlocks::small_block_min_size()) {
1509     return_block(new_block + word_size, unused);
1510   }
1511 
1512   log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1513             p2i(new_block), word_size);
1514   return new_block;
1515 }
1516 
1517 void BlockFreelist::print_on(outputStream* st) const {
1518   dictionary()->print_free_lists(st);
1519   if (_small_blocks != NULL) {
1520     _small_blocks->print_on(st);
1521   }
1522 }
1523 
1524 // VirtualSpaceNode methods
1525 
1526 VirtualSpaceNode::~VirtualSpaceNode() {
1527   _rs.release();
1528   if (_occupancy_map != NULL) {
1529     delete _occupancy_map;
1530   }
1531 #ifdef ASSERT
1532   size_t word_size = sizeof(*this) / BytesPerWord;
1533   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1534 #endif
1535 }
1536 
1537 size_t VirtualSpaceNode::used_words_in_vs() const {
1538   return pointer_delta(top(), bottom(), sizeof(MetaWord));
1539 }
1540 
1541 // Space committed in the VirtualSpace
1542 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1543   return pointer_delta(end(), bottom(), sizeof(MetaWord));
1544 }
1545 
1546 size_t VirtualSpaceNode::free_words_in_vs() const {
1547   return pointer_delta(end(), top(), sizeof(MetaWord));
1548 }
1549 
1550 // Given an address larger than top(), allocate padding chunks until top is at the given address.
1551 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
1552 
1553   assert(target_top > top(), "Sanity");
1554 
1555   // Padding chunks are added to the freelist.
1556   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1557 
1558   // shorthands
1559   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1560   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1561   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1562 
1563   while (top() < target_top) {
1564 
1565     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
1566     // for padding chunks, so it is not worth it.
1567     size_t padding_chunk_word_size = small_word_size;
1568     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1569       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1570       padding_chunk_word_size = spec_word_size;
1571     }
1572     MetaWord* here = top();
1573     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1574     inc_top(padding_chunk_word_size);
1575 
1576     // Create new padding chunk.
1577     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1578     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1579 
1580     Metachunk* const padding_chunk =
1581       ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1582     assert(padding_chunk == (Metachunk*)here, "Sanity");
1583     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1584     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
1585                                        PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1586                                        (is_class() ? "class space " : "metaspace"),
1587                                        p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1588 
1589     // Mark chunk start in occupancy map.
1590     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1591 
1592     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1593     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1594     // will assert that).
1595     do_update_in_use_info_for_chunk(padding_chunk, true);
1596 
1597     // Return Chunk to freelist.
1598     inc_container_count();
1599     chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1600     // Please note: at this point, ChunkManager::return_single_chunk()
1601     // may already have merged the padding chunk with neighboring chunks, so
1602     // it may have vanished at this point. Do not reference the padding
1603     // chunk beyond this point.
1604   }
1605 
1606   assert(top() == target_top, "Sanity");
1607 
1608 } // allocate_padding_chunks_until_top_is_at()
1609 
1610 // Allocates the chunk from the virtual space only.
1611 // This interface is also used internally for debugging.  Not all
1612 // chunks removed here are necessarily used for allocation.
1613 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1614   // Non-humongous chunks are to be allocated aligned to their chunk
1615   // size. So, start addresses of medium chunks are aligned to medium
1616   // chunk size, those of small chunks to small chunk size and so
1617   // forth. This facilitates merging of free chunks and reduces
1618   // fragmentation. Chunk sizes are spec < small < medium, with each
1619   // larger chunk size being a multiple of the next smaller chunk
1620   // size.
1621   // Because of this alignment, me may need to create a number of padding
1622   // chunks. These chunks are created and added to the freelist.
1623 
1624   // The chunk manager to which we will give our padding chunks.
1625   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1626 
1627   // shorthands
1628   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1629   const size_t small_word_size = chunk_manager->small_chunk_word_size();
1630   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1631 
1632   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1633          chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1634 
1635   // Chunk alignment (in bytes) == chunk size unless humongous.
1636   // Humongous chunks are aligned to the smallest chunk size (spec).
1637   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1638                                            spec_word_size : chunk_word_size) * sizeof(MetaWord);
1639 
1640   // Do we have enough space to create the requested chunk plus
1641   // any padding chunks needed?
1642   MetaWord* const next_aligned =
1643     static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1644   if (!is_available((next_aligned - top()) + chunk_word_size)) {
1645     return NULL;
1646   }
1647 
1648   // Before allocating the requested chunk, allocate padding chunks if necessary.
1649   // We only need to do this for small or medium chunks: specialized chunks are the
1650   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1651   // (implicitly, also aligned to smallest chunk size).
1652   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
1653     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
1654         (is_class() ? "class space " : "metaspace"),
1655         top(), next_aligned);
1656     allocate_padding_chunks_until_top_is_at(next_aligned);
1657     // Now, top should be aligned correctly.
1658     assert_is_aligned(top(), required_chunk_alignment);
1659   }
1660 
1661   // Now, top should be aligned correctly.
1662   assert_is_aligned(top(), required_chunk_alignment);
1663 
1664   // Bottom of the new chunk
1665   MetaWord* chunk_limit = top();
1666   assert(chunk_limit != NULL, "Not safe to call this method");
1667 
1668   // The virtual spaces are always expanded by the
1669   // commit granularity to enforce the following condition.
1670   // Without this the is_available check will not work correctly.
1671   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1672       "The committed memory doesn't match the expanded memory.");
1673 
1674   if (!is_available(chunk_word_size)) {
1675     LogTarget(Debug, gc, metaspace, freelist) lt;
1676     if (lt.is_enabled()) {
1677       LogStream ls(lt);
1678       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1679       // Dump some information about the virtual space that is nearly full
1680       print_on(&ls);
1681     }
1682     return NULL;
1683   }
1684 
1685   // Take the space  (bump top on the current virtual space).
1686   inc_top(chunk_word_size);
1687 
1688   // Initialize the chunk
1689   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1690   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1691   assert(result == (Metachunk*)chunk_limit, "Sanity");
1692   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1693   do_update_in_use_info_for_chunk(result, true);
1694 
1695   inc_container_count();
1696 
1697   if (VerifyMetaspace) {
1698     DEBUG_ONLY(chunk_manager->locked_verify());
1699     DEBUG_ONLY(this->verify());
1700   }
1701 
1702   DEBUG_ONLY(do_verify_chunk(result));
1703 
1704   result->inc_use_count();
1705 
1706   return result;
1707 }
1708 
1709 
1710 // Expand the virtual space (commit more of the reserved space)
1711 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1712   size_t min_bytes = min_words * BytesPerWord;
1713   size_t preferred_bytes = preferred_words * BytesPerWord;
1714 
1715   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1716 
1717   if (uncommitted < min_bytes) {
1718     return false;
1719   }
1720 
1721   size_t commit = MIN2(preferred_bytes, uncommitted);
1722   bool result = virtual_space()->expand_by(commit, false);
1723 
1724   if (result) {
1725     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1726               (is_class() ? "class" : "non-class"), commit);
1727   } else {
1728     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1729               (is_class() ? "class" : "non-class"), commit);
1730   }
1731 
1732   assert(result, "Failed to commit memory");
1733 
1734   return result;
1735 }
1736 
1737 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1738   assert_lock_strong(SpaceManager::expand_lock());
1739   Metachunk* result = take_from_committed(chunk_word_size);
1740   return result;
1741 }
1742 
1743 bool VirtualSpaceNode::initialize() {
1744 
1745   if (!_rs.is_reserved()) {
1746     return false;
1747   }
1748 
1749   // These are necessary restriction to make sure that the virtual space always
1750   // grows in steps of Metaspace::commit_alignment(). If both base and size are
1751   // aligned only the middle alignment of the VirtualSpace is used.
1752   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1753   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1754 
1755   // ReservedSpaces marked as special will have the entire memory
1756   // pre-committed. Setting a committed size will make sure that
1757   // committed_size and actual_committed_size agrees.
1758   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1759 
1760   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1761                                             Metaspace::commit_alignment());
1762   if (result) {
1763     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1764         "Checking that the pre-committed memory was registered by the VirtualSpace");
1765 
1766     set_top((MetaWord*)virtual_space()->low());
1767     set_reserved(MemRegion((HeapWord*)_rs.base(),
1768                  (HeapWord*)(_rs.base() + _rs.size())));
1769 
1770     assert(reserved()->start() == (HeapWord*) _rs.base(),
1771            "Reserved start was not set properly " PTR_FORMAT
1772            " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1773     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1774            "Reserved size was not set properly " SIZE_FORMAT
1775            " != " SIZE_FORMAT, reserved()->word_size(),
1776            _rs.size() / BytesPerWord);
1777   }
1778 
1779   // Initialize Occupancy Map.
1780   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1781   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1782 
1783   return result;
1784 }
1785 
1786 void VirtualSpaceNode::print_on(outputStream* st) const {
1787   size_t used = used_words_in_vs();
1788   size_t capacity = capacity_words_in_vs();
1789   VirtualSpace* vs = virtual_space();
1790   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1791            "[" PTR_FORMAT ", " PTR_FORMAT ", "
1792            PTR_FORMAT ", " PTR_FORMAT ")",
1793            p2i(vs), capacity / K,
1794            capacity == 0 ? 0 : used * 100 / capacity,
1795            p2i(bottom()), p2i(top()), p2i(end()),
1796            p2i(vs->high_boundary()));
1797 }
1798 
1799 #ifdef ASSERT
1800 void VirtualSpaceNode::mangle() {
1801   size_t word_size = capacity_words_in_vs();
1802   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1803 }
1804 #endif // ASSERT
1805 
1806 // VirtualSpaceList methods
1807 // Space allocated from the VirtualSpace
1808 
1809 VirtualSpaceList::~VirtualSpaceList() {
1810   VirtualSpaceListIterator iter(virtual_space_list());
1811   while (iter.repeat()) {
1812     VirtualSpaceNode* vsl = iter.get_next();
1813     delete vsl;
1814   }
1815 }
1816 
1817 void VirtualSpaceList::inc_reserved_words(size_t v) {
1818   assert_lock_strong(SpaceManager::expand_lock());
1819   _reserved_words = _reserved_words + v;
1820 }
1821 void VirtualSpaceList::dec_reserved_words(size_t v) {
1822   assert_lock_strong(SpaceManager::expand_lock());
1823   _reserved_words = _reserved_words - v;
1824 }
1825 
1826 #define assert_committed_below_limit()                        \
1827   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
1828          "Too much committed memory. Committed: " SIZE_FORMAT \
1829          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
1830           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
1831 
1832 void VirtualSpaceList::inc_committed_words(size_t v) {
1833   assert_lock_strong(SpaceManager::expand_lock());
1834   _committed_words = _committed_words + v;
1835 
1836   assert_committed_below_limit();
1837 }
1838 void VirtualSpaceList::dec_committed_words(size_t v) {
1839   assert_lock_strong(SpaceManager::expand_lock());
1840   _committed_words = _committed_words - v;
1841 
1842   assert_committed_below_limit();
1843 }
1844 
1845 void VirtualSpaceList::inc_virtual_space_count() {
1846   assert_lock_strong(SpaceManager::expand_lock());
1847   _virtual_space_count++;
1848 }
1849 void VirtualSpaceList::dec_virtual_space_count() {
1850   assert_lock_strong(SpaceManager::expand_lock());
1851   _virtual_space_count--;
1852 }
1853 
1854 void ChunkManager::remove_chunk(Metachunk* chunk) {
1855   size_t word_size = chunk->word_size();
1856   ChunkIndex index = list_index(word_size);
1857   if (index != HumongousIndex) {
1858     free_chunks(index)->remove_chunk(chunk);
1859   } else {
1860     humongous_dictionary()->remove_chunk(chunk);
1861   }
1862 
1863   // Chunk has been removed from the chunks free list, update counters.
1864   account_for_removed_chunk(chunk);
1865 }
1866 
1867 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1868   assert_lock_strong(SpaceManager::expand_lock());
1869   assert(chunk != NULL, "invalid chunk pointer");
1870   // Check for valid merge combinations.
1871   assert((chunk->get_chunk_type() == SpecializedIndex &&
1872           (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1873          (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1874         "Invalid chunk merge combination.");
1875 
1876   const size_t target_chunk_word_size =
1877     get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1878 
1879   // [ prospective merge region )
1880   MetaWord* const p_merge_region_start =
1881     (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1882   MetaWord* const p_merge_region_end =
1883     p_merge_region_start + target_chunk_word_size;
1884 
1885   // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1886   VirtualSpaceNode* const vsn = chunk->container();
1887   OccupancyMap* const ocmap = vsn->occupancy_map();
1888 
1889   // The prospective chunk merge range must be completely contained by the
1890   // committed range of the virtual space node.
1891   if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
1892     return false;
1893   }
1894 
1895   // Only attempt to merge this range if at its start a chunk starts and at its end
1896   // a chunk ends. If a chunk (can only be humongous) straddles either start or end
1897   // of that range, we cannot merge.
1898   if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
1899     return false;
1900   }
1901   if (p_merge_region_end < vsn->top() &&
1902       !ocmap->chunk_starts_at_address(p_merge_region_end)) {
1903     return false;
1904   }
1905 
1906   // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
1907   if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
1908     return false;
1909   }
1910 
1911   // Success! Remove all chunks in this region...
1912   log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
1913     (is_class() ? "class space" : "metaspace"),
1914     p_merge_region_start, p_merge_region_end);
1915 
1916   const int num_chunks_removed =
1917     remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
1918 
1919   // ... and create a single new bigger chunk.
1920   Metachunk* const p_new_chunk =
1921       ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1922   assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
1923   p_new_chunk->set_origin(origin_merge);
1924 
1925   log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1926     (is_class() ? "class space" : "metaspace"),
1927     p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1928 
1929   // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1930   ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
1931   ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
1932 
1933   // Mark chunk as free. Note: it is not necessary to update the occupancy
1934   // map in-use map, because the old chunks were also free, so nothing
1935   // should have changed.
1936   p_new_chunk->set_is_tagged_free(true);
1937 
1938   // Add new chunk to its freelist.
1939   ChunkList* const list = free_chunks(target_chunk_type);
1940   list->return_chunk_at_head(p_new_chunk);
1941 
1942   // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1943   // should not have changed, because the size of the space should be the same)
1944   _free_chunks_count -= num_chunks_removed;
1945   _free_chunks_count ++;
1946 
1947   // VirtualSpaceNode::container_count does not have to be modified:
1948   // it means "number of active (non-free) chunks", so merging free chunks
1949   // should not affect that count.
1950 
1951   // At the end of a chunk merge, run verification tests.
1952   if (VerifyMetaspace) {
1953     DEBUG_ONLY(this->locked_verify());
1954     DEBUG_ONLY(vsn->verify());
1955   }
1956 
1957   return true;
1958 }
1959 
1960 // Remove all chunks in the given area - the chunks are supposed to be free -
1961 // from their corresponding freelists. Mark them as invalid.
1962 // - This does not correct the occupancy map.
1963 // - This does not adjust the counters in ChunkManager.
1964 // - Does not adjust container count counter in containing VirtualSpaceNode
1965 // Returns number of chunks removed.
1966 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1967   assert(p != NULL && word_size > 0, "Invalid range.");
1968   const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1969   assert_is_aligned(word_size, smallest_chunk_size);
1970 
1971   Metachunk* const start = (Metachunk*) p;
1972   const Metachunk* const end = (Metachunk*)(p + word_size);
1973   Metachunk* cur = start;
1974   int num_removed = 0;
1975   while (cur < end) {
1976     Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1977     DEBUG_ONLY(do_verify_chunk(cur));
1978     assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1979     assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1980     log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
1981       (is_class() ? "class space" : "metaspace"),
1982       cur, cur->word_size() * sizeof(MetaWord));
1983     cur->remove_sentinel();
1984     // Note: cannot call ChunkManager::remove_chunk, because that
1985     // modifies the counters in ChunkManager, which we do not want. So
1986     // we call remove_chunk on the freelist directly (see also the
1987     // splitting function which does the same).
1988     ChunkList* const list = free_chunks(list_index(cur->word_size()));
1989     list->remove_chunk(cur);
1990     num_removed ++;
1991     cur = next;
1992   }
1993   return num_removed;
1994 }
1995 
1996 // Walk the list of VirtualSpaceNodes and delete
1997 // nodes with a 0 container_count.  Remove Metachunks in
1998 // the node from their respective freelists.
1999 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
2000   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
2001   assert_lock_strong(SpaceManager::expand_lock());
2002   // Don't use a VirtualSpaceListIterator because this
2003   // list is being changed and a straightforward use of an iterator is not safe.
2004   VirtualSpaceNode* purged_vsl = NULL;
2005   VirtualSpaceNode* prev_vsl = virtual_space_list();
2006   VirtualSpaceNode* next_vsl = prev_vsl;
2007   while (next_vsl != NULL) {
2008     VirtualSpaceNode* vsl = next_vsl;
2009     DEBUG_ONLY(vsl->verify_container_count();)
2010     next_vsl = vsl->next();
2011     // Don't free the current virtual space since it will likely
2012     // be needed soon.
2013     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
2014       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
2015                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
2016       // Unlink it from the list
2017       if (prev_vsl == vsl) {
2018         // This is the case of the current node being the first node.
2019         assert(vsl == virtual_space_list(), "Expected to be the first node");
2020         set_virtual_space_list(vsl->next());
2021       } else {
2022         prev_vsl->set_next(vsl->next());
2023       }
2024 
2025       vsl->purge(chunk_manager);
2026       dec_reserved_words(vsl->reserved_words());
2027       dec_committed_words(vsl->committed_words());
2028       dec_virtual_space_count();
2029       purged_vsl = vsl;
2030       delete vsl;
2031     } else {
2032       prev_vsl = vsl;
2033     }
2034   }
2035 #ifdef ASSERT
2036   if (purged_vsl != NULL) {
2037     // List should be stable enough to use an iterator here.
2038     VirtualSpaceListIterator iter(virtual_space_list());
2039     while (iter.repeat()) {
2040       VirtualSpaceNode* vsl = iter.get_next();
2041       assert(vsl != purged_vsl, "Purge of vsl failed");
2042     }
2043   }
2044 #endif
2045 }
2046 
2047 
2048 // This function looks at the mmap regions in the metaspace without locking.
2049 // The chunks are added with store ordering and not deleted except for at
2050 // unloading time during a safepoint.
2051 bool VirtualSpaceList::contains(const void* ptr) {
2052   // List should be stable enough to use an iterator here because removing virtual
2053   // space nodes is only allowed at a safepoint.
2054   VirtualSpaceListIterator iter(virtual_space_list());
2055   while (iter.repeat()) {
2056     VirtualSpaceNode* vsn = iter.get_next();
2057     if (vsn->contains(ptr)) {
2058       return true;
2059     }
2060   }
2061   return false;
2062 }
2063 
2064 void VirtualSpaceList::retire_current_virtual_space() {
2065   assert_lock_strong(SpaceManager::expand_lock());
2066 
2067   VirtualSpaceNode* vsn = current_virtual_space();
2068 
2069   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2070                                   Metaspace::chunk_manager_metadata();
2071 
2072   vsn->retire(cm);
2073 }
2074 
2075 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2076   DEBUG_ONLY(verify_container_count();)
2077   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2078   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2079     ChunkIndex index = (ChunkIndex)i;
2080     size_t chunk_size = chunk_manager->size_by_index(index);
2081 
2082     while (free_words_in_vs() >= chunk_size) {
2083       Metachunk* chunk = get_chunk_vs(chunk_size);
2084       // Chunk will be allocated aligned, so allocation may require
2085       // additional padding chunks. That may cause above allocation to
2086       // fail. Just ignore the failed allocation and continue with the
2087       // next smaller chunk size. As the VirtualSpaceNode comitted
2088       // size should be a multiple of the smallest chunk size, we
2089       // should always be able to fill the VirtualSpace completely.
2090       if (chunk == NULL) {
2091         break;
2092       }
2093       chunk_manager->return_single_chunk(index, chunk);
2094     }
2095     DEBUG_ONLY(verify_container_count();)
2096   }
2097   assert(free_words_in_vs() == 0, "should be empty now");
2098 }
2099 
2100 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2101                                    _is_class(false),
2102                                    _virtual_space_list(NULL),
2103                                    _current_virtual_space(NULL),
2104                                    _reserved_words(0),
2105                                    _committed_words(0),
2106                                    _virtual_space_count(0) {
2107   MutexLockerEx cl(SpaceManager::expand_lock(),
2108                    Mutex::_no_safepoint_check_flag);
2109   create_new_virtual_space(word_size);
2110 }
2111 
2112 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2113                                    _is_class(true),
2114                                    _virtual_space_list(NULL),
2115                                    _current_virtual_space(NULL),
2116                                    _reserved_words(0),
2117                                    _committed_words(0),
2118                                    _virtual_space_count(0) {
2119   MutexLockerEx cl(SpaceManager::expand_lock(),
2120                    Mutex::_no_safepoint_check_flag);
2121   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2122   bool succeeded = class_entry->initialize();
2123   if (succeeded) {
2124     link_vs(class_entry);
2125   }
2126 }
2127 
2128 size_t VirtualSpaceList::free_bytes() {
2129   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2130 }
2131 
2132 // Allocate another meta virtual space and add it to the list.
2133 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2134   assert_lock_strong(SpaceManager::expand_lock());
2135 
2136   if (is_class()) {
2137     assert(false, "We currently don't support more than one VirtualSpace for"
2138                   " the compressed class space. The initialization of the"
2139                   " CCS uses another code path and should not hit this path.");
2140     return false;
2141   }
2142 
2143   if (vs_word_size == 0) {
2144     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2145     return false;
2146   }
2147 
2148   // Reserve the space
2149   size_t vs_byte_size = vs_word_size * BytesPerWord;
2150   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2151 
2152   // Allocate the meta virtual space and initialize it.
2153   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2154   if (!new_entry->initialize()) {
2155     delete new_entry;
2156     return false;
2157   } else {
2158     assert(new_entry->reserved_words() == vs_word_size,
2159         "Reserved memory size differs from requested memory size");
2160     // ensure lock-free iteration sees fully initialized node
2161     OrderAccess::storestore();
2162     link_vs(new_entry);
2163     return true;
2164   }
2165 }
2166 
2167 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2168   if (virtual_space_list() == NULL) {
2169       set_virtual_space_list(new_entry);
2170   } else {
2171     current_virtual_space()->set_next(new_entry);
2172   }
2173   set_current_virtual_space(new_entry);
2174   inc_reserved_words(new_entry->reserved_words());
2175   inc_committed_words(new_entry->committed_words());
2176   inc_virtual_space_count();
2177 #ifdef ASSERT
2178   new_entry->mangle();
2179 #endif
2180   LogTarget(Trace, gc, metaspace) lt;
2181   if (lt.is_enabled()) {
2182     LogStream ls(lt);
2183     VirtualSpaceNode* vsl = current_virtual_space();
2184     ResourceMark rm;
2185     vsl->print_on(&ls);
2186   }
2187 }
2188 
2189 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
2190                                       size_t min_words,
2191                                       size_t preferred_words) {
2192   size_t before = node->committed_words();
2193 
2194   bool result = node->expand_by(min_words, preferred_words);
2195 
2196   size_t after = node->committed_words();
2197 
2198   // after and before can be the same if the memory was pre-committed.
2199   assert(after >= before, "Inconsistency");
2200   inc_committed_words(after - before);
2201 
2202   return result;
2203 }
2204 
2205 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2206   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
2207   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2208   assert(min_words <= preferred_words, "Invalid arguments");
2209 
2210   const char* const class_or_not = (is_class() ? "class" : "non-class");
2211 
2212   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2213     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2214               class_or_not);
2215     return  false;
2216   }
2217 
2218   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2219   if (allowed_expansion_words < min_words) {
2220     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2221               class_or_not);
2222     return false;
2223   }
2224 
2225   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2226 
2227   // Commit more memory from the the current virtual space.
2228   bool vs_expanded = expand_node_by(current_virtual_space(),
2229                                     min_words,
2230                                     max_expansion_words);
2231   if (vs_expanded) {
2232      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2233                class_or_not);
2234      return true;
2235   }
2236   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2237             class_or_not);
2238   retire_current_virtual_space();
2239 
2240   // Get another virtual space.
2241   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2242   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2243 
2244   if (create_new_virtual_space(grow_vs_words)) {
2245     if (current_virtual_space()->is_pre_committed()) {
2246       // The memory was pre-committed, so we are done here.
2247       assert(min_words <= current_virtual_space()->committed_words(),
2248           "The new VirtualSpace was pre-committed, so it"
2249           "should be large enough to fit the alloc request.");
2250       return true;
2251     }
2252 
2253     return expand_node_by(current_virtual_space(),
2254                           min_words,
2255                           max_expansion_words);
2256   }
2257 
2258   return false;
2259 }
2260 
2261 // Given a chunk, calculate the largest possible padding space which
2262 // could be required when allocating it.
2263 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2264   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2265   if (chunk_type != HumongousIndex) {
2266     // Normal, non-humongous chunks are allocated at chunk size
2267     // boundaries, so the largest padding space required would be that
2268     // minus the smallest chunk size.
2269     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2270     return chunk_word_size - smallest_chunk_size;
2271   } else {
2272     // Humongous chunks are allocated at smallest-chunksize
2273     // boundaries, so there is no padding required.
2274     return 0;
2275   }
2276 }
2277 
2278 
2279 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2280 
2281   // Allocate a chunk out of the current virtual space.
2282   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2283 
2284   if (next != NULL) {
2285     return next;
2286   }
2287 
2288   // The expand amount is currently only determined by the requested sizes
2289   // and not how much committed memory is left in the current virtual space.
2290 
2291   // We must have enough space for the requested size and any
2292   // additional reqired padding chunks.
2293   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2294 
2295   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2296   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2297   if (min_word_size >= preferred_word_size) {
2298     // Can happen when humongous chunks are allocated.
2299     preferred_word_size = min_word_size;
2300   }
2301 
2302   bool expanded = expand_by(min_word_size, preferred_word_size);
2303   if (expanded) {
2304     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2305     assert(next != NULL, "The allocation was expected to succeed after the expansion");
2306   }
2307 
2308    return next;
2309 }
2310 
2311 void VirtualSpaceList::print_on(outputStream* st) const {
2312   VirtualSpaceListIterator iter(virtual_space_list());
2313   while (iter.repeat()) {
2314     VirtualSpaceNode* node = iter.get_next();
2315     node->print_on(st);
2316   }
2317 }
2318 
2319 void VirtualSpaceList::print_map(outputStream* st) const {
2320   VirtualSpaceNode* list = virtual_space_list();
2321   VirtualSpaceListIterator iter(list);
2322   unsigned i = 0;
2323   while (iter.repeat()) {
2324     st->print_cr("Node %u:", i);
2325     VirtualSpaceNode* node = iter.get_next();
2326     node->print_map(st, this->is_class());
2327     i ++;
2328   }
2329 }
2330 
2331 // MetaspaceGC methods
2332 
2333 // VM_CollectForMetadataAllocation is the vm operation used to GC.
2334 // Within the VM operation after the GC the attempt to allocate the metadata
2335 // should succeed.  If the GC did not free enough space for the metaspace
2336 // allocation, the HWM is increased so that another virtualspace will be
2337 // allocated for the metadata.  With perm gen the increase in the perm
2338 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
2339 // metaspace policy uses those as the small and large steps for the HWM.
2340 //
2341 // After the GC the compute_new_size() for MetaspaceGC is called to
2342 // resize the capacity of the metaspaces.  The current implementation
2343 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
2344 // to resize the Java heap by some GC's.  New flags can be implemented
2345 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
2346 // free space is desirable in the metaspace capacity to decide how much
2347 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
2348 // free space is desirable in the metaspace capacity before decreasing
2349 // the HWM.
2350 
2351 // Calculate the amount to increase the high water mark (HWM).
2352 // Increase by a minimum amount (MinMetaspaceExpansion) so that
2353 // another expansion is not requested too soon.  If that is not
2354 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
2355 // If that is still not enough, expand by the size of the allocation
2356 // plus some.
2357 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
2358   size_t min_delta = MinMetaspaceExpansion;
2359   size_t max_delta = MaxMetaspaceExpansion;
2360   size_t delta = align_up(bytes, Metaspace::commit_alignment());
2361 
2362   if (delta <= min_delta) {
2363     delta = min_delta;
2364   } else if (delta <= max_delta) {
2365     // Don't want to hit the high water mark on the next
2366     // allocation so make the delta greater than just enough
2367     // for this allocation.
2368     delta = max_delta;
2369   } else {
2370     // This allocation is large but the next ones are probably not
2371     // so increase by the minimum.
2372     delta = delta + min_delta;
2373   }
2374 
2375   assert_is_aligned(delta, Metaspace::commit_alignment());
2376 
2377   return delta;
2378 }
2379 
2380 size_t MetaspaceGC::capacity_until_GC() {
2381   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
2382   assert(value >= MetaspaceSize, "Not initialized properly?");
2383   return value;
2384 }
2385 
2386 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
2387   assert_is_aligned(v, Metaspace::commit_alignment());
2388 
2389   intptr_t capacity_until_GC = _capacity_until_GC;
2390   intptr_t new_value = capacity_until_GC + v;
2391 
2392   if (new_value < capacity_until_GC) {
2393     // The addition wrapped around, set new_value to aligned max value.
2394     new_value = align_down(max_uintx, Metaspace::commit_alignment());
2395   }
2396 
2397   intptr_t expected = _capacity_until_GC;
2398   intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
2399 
2400   if (expected != actual) {
2401     return false;
2402   }
2403 
2404   if (new_cap_until_GC != NULL) {
2405     *new_cap_until_GC = new_value;
2406   }
2407   if (old_cap_until_GC != NULL) {
2408     *old_cap_until_GC = capacity_until_GC;
2409   }
2410   return true;
2411 }
2412 
2413 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
2414   assert_is_aligned(v, Metaspace::commit_alignment());
2415 
2416   return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2417 }
2418 
2419 void MetaspaceGC::initialize() {
2420   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2421   // we can't do a GC during initialization.
2422   _capacity_until_GC = MaxMetaspaceSize;
2423 }
2424 
2425 void MetaspaceGC::post_initialize() {
2426   // Reset the high-water mark once the VM initialization is done.
2427   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
2428 }
2429 
2430 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2431   // Check if the compressed class space is full.
2432   if (is_class && Metaspace::using_class_space()) {
2433     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
2434     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2435       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2436                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2437       return false;
2438     }
2439   }
2440 
2441   // Check if the user has imposed a limit on the metaspace memory.
2442   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2443   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2444     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2445               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2446     return false;
2447   }
2448 
2449   return true;
2450 }
2451 
2452 size_t MetaspaceGC::allowed_expansion() {
2453   size_t committed_bytes = MetaspaceUtils::committed_bytes();
2454   size_t capacity_until_gc = capacity_until_GC();
2455 
2456   assert(capacity_until_gc >= committed_bytes,
2457          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2458          capacity_until_gc, committed_bytes);
2459 
2460   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
2461   size_t left_until_GC = capacity_until_gc - committed_bytes;
2462   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2463   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2464             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2465             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2466 
2467   return left_to_commit / BytesPerWord;
2468 }
2469 
2470 void MetaspaceGC::compute_new_size() {
2471   assert(_shrink_factor <= 100, "invalid shrink factor");
2472   uint current_shrink_factor = _shrink_factor;
2473   _shrink_factor = 0;
2474 
2475   // Using committed_bytes() for used_after_gc is an overestimation, since the
2476   // chunk free lists are included in committed_bytes() and the memory in an
2477   // un-fragmented chunk free list is available for future allocations.
2478   // However, if the chunk free lists becomes fragmented, then the memory may
2479   // not be available for future allocations and the memory is therefore "in use".
2480   // Including the chunk free lists in the definition of "in use" is therefore
2481   // necessary. Not including the chunk free lists can cause capacity_until_GC to
2482   // shrink below committed_bytes() and this has caused serious bugs in the past.
2483   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
2484   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2485 
2486   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
2487   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
2488 
2489   const double min_tmp = used_after_gc / maximum_used_percentage;
2490   size_t minimum_desired_capacity =
2491     (size_t)MIN2(min_tmp, double(max_uintx));
2492   // Don't shrink less than the initial generation size
2493   minimum_desired_capacity = MAX2(minimum_desired_capacity,
2494                                   MetaspaceSize);
2495 
2496   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
2497   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
2498                            minimum_free_percentage, maximum_used_percentage);
2499   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
2500 
2501 
2502   size_t shrink_bytes = 0;
2503   if (capacity_until_GC < minimum_desired_capacity) {
2504     // If we have less capacity below the metaspace HWM, then
2505     // increment the HWM.
2506     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
2507     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
2508     // Don't expand unless it's significant
2509     if (expand_bytes >= MinMetaspaceExpansion) {
2510       size_t new_capacity_until_GC = 0;
2511       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
2512       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
2513 
2514       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2515                                                new_capacity_until_GC,
2516                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
2517       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
2518                                minimum_desired_capacity / (double) K,
2519                                expand_bytes / (double) K,
2520                                MinMetaspaceExpansion / (double) K,
2521                                new_capacity_until_GC / (double) K);
2522     }
2523     return;
2524   }
2525 
2526   // No expansion, now see if we want to shrink
2527   // We would never want to shrink more than this
2528   assert(capacity_until_GC >= minimum_desired_capacity,
2529          SIZE_FORMAT " >= " SIZE_FORMAT,
2530          capacity_until_GC, minimum_desired_capacity);
2531   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
2532 
2533   // Should shrinking be considered?
2534   if (MaxMetaspaceFreeRatio < 100) {
2535     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
2536     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
2537     const double max_tmp = used_after_gc / minimum_used_percentage;
2538     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
2539     maximum_desired_capacity = MAX2(maximum_desired_capacity,
2540                                     MetaspaceSize);
2541     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
2542                              maximum_free_percentage, minimum_used_percentage);
2543     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
2544                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
2545 
2546     assert(minimum_desired_capacity <= maximum_desired_capacity,
2547            "sanity check");
2548 
2549     if (capacity_until_GC > maximum_desired_capacity) {
2550       // Capacity too large, compute shrinking size
2551       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
2552       // We don't want shrink all the way back to initSize if people call
2553       // System.gc(), because some programs do that between "phases" and then
2554       // we'd just have to grow the heap up again for the next phase.  So we
2555       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
2556       // on the third call, and 100% by the fourth call.  But if we recompute
2557       // size without shrinking, it goes back to 0%.
2558       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
2559 
2560       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
2561 
2562       assert(shrink_bytes <= max_shrink_bytes,
2563              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
2564              shrink_bytes, max_shrink_bytes);
2565       if (current_shrink_factor == 0) {
2566         _shrink_factor = 10;
2567       } else {
2568         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
2569       }
2570       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
2571                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
2572       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
2573                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
2574     }
2575   }
2576 
2577   // Don't shrink unless it's significant
2578   if (shrink_bytes >= MinMetaspaceExpansion &&
2579       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
2580     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
2581     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
2582                                              new_capacity_until_GC,
2583                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
2584   }
2585 }
2586 
2587 // Metadebug methods
2588 
2589 void Metadebug::init_allocation_fail_alot_count() {
2590   if (MetadataAllocationFailALot) {
2591     _allocation_fail_alot_count =
2592       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
2593   }
2594 }
2595 
2596 #ifdef ASSERT
2597 bool Metadebug::test_metadata_failure() {
2598   if (MetadataAllocationFailALot &&
2599       Threads::is_vm_complete()) {
2600     if (_allocation_fail_alot_count > 0) {
2601       _allocation_fail_alot_count--;
2602     } else {
2603       log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
2604       init_allocation_fail_alot_count();
2605       return true;
2606     }
2607   }
2608   return false;
2609 }
2610 #endif
2611 
2612 // ChunkManager methods
2613 size_t ChunkManager::free_chunks_total_words() {
2614   return _free_chunks_total;
2615 }
2616 
2617 size_t ChunkManager::free_chunks_total_bytes() {
2618   return free_chunks_total_words() * BytesPerWord;
2619 }
2620 
2621 // Update internal accounting after a chunk was added
2622 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
2623   assert_lock_strong(SpaceManager::expand_lock());
2624   _free_chunks_count ++;
2625   _free_chunks_total += c->word_size();
2626 }
2627 
2628 // Update internal accounting after a chunk was removed
2629 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
2630   assert_lock_strong(SpaceManager::expand_lock());
2631   assert(_free_chunks_count >= 1,
2632     "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
2633   assert(_free_chunks_total >= c->word_size(),
2634     "ChunkManager::_free_chunks_total: about to go negative"
2635      "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
2636   _free_chunks_count --;
2637   _free_chunks_total -= c->word_size();
2638 }
2639 
2640 size_t ChunkManager::free_chunks_count() {
2641 #ifdef ASSERT
2642   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
2643     MutexLockerEx cl(SpaceManager::expand_lock(),
2644                      Mutex::_no_safepoint_check_flag);
2645     // This lock is only needed in debug because the verification
2646     // of the _free_chunks_totals walks the list of free chunks
2647     slow_locked_verify_free_chunks_count();
2648   }
2649 #endif
2650   return _free_chunks_count;
2651 }
2652 
2653 ChunkIndex ChunkManager::list_index(size_t size) {
2654   if (size_by_index(SpecializedIndex) == size) {
2655     return SpecializedIndex;
2656   }
2657   if (size_by_index(SmallIndex) == size) {
2658     return SmallIndex;
2659   }
2660   const size_t med_size = size_by_index(MediumIndex);
2661   if (med_size == size) {
2662     return MediumIndex;
2663   }
2664 
2665   assert(size > med_size, "Not a humongous chunk");
2666   return HumongousIndex;
2667 }
2668 
2669 size_t ChunkManager::size_by_index(ChunkIndex index) const {
2670   index_bounds_check(index);
2671   assert(index != HumongousIndex, "Do not call for humongous chunks.");
2672   return _free_chunks[index].size();
2673 }
2674 
2675 void ChunkManager::locked_verify_free_chunks_total() {
2676   assert_lock_strong(SpaceManager::expand_lock());
2677   assert(sum_free_chunks() == _free_chunks_total,
2678          "_free_chunks_total " SIZE_FORMAT " is not the"
2679          " same as sum " SIZE_FORMAT, _free_chunks_total,
2680          sum_free_chunks());
2681 }
2682 
2683 void ChunkManager::verify_free_chunks_total() {
2684   MutexLockerEx cl(SpaceManager::expand_lock(),
2685                      Mutex::_no_safepoint_check_flag);
2686   locked_verify_free_chunks_total();
2687 }
2688 
2689 void ChunkManager::locked_verify_free_chunks_count() {
2690   assert_lock_strong(SpaceManager::expand_lock());
2691   assert(sum_free_chunks_count() == _free_chunks_count,
2692          "_free_chunks_count " SIZE_FORMAT " is not the"
2693          " same as sum " SIZE_FORMAT, _free_chunks_count,
2694          sum_free_chunks_count());
2695 }
2696 
2697 void ChunkManager::verify_free_chunks_count() {
2698 #ifdef ASSERT
2699   MutexLockerEx cl(SpaceManager::expand_lock(),
2700                      Mutex::_no_safepoint_check_flag);
2701   locked_verify_free_chunks_count();
2702 #endif
2703 }
2704 
2705 void ChunkManager::verify() {
2706   MutexLockerEx cl(SpaceManager::expand_lock(),
2707                      Mutex::_no_safepoint_check_flag);
2708   locked_verify();
2709 }
2710 
2711 void ChunkManager::locked_verify() {
2712   locked_verify_free_chunks_count();
2713   locked_verify_free_chunks_total();
2714   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2715     ChunkList* list = free_chunks(i);
2716     if (list != NULL) {
2717       Metachunk* chunk = list->head();
2718       while (chunk) {
2719         DEBUG_ONLY(do_verify_chunk(chunk);)
2720         assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2721         chunk = chunk->next();
2722       }
2723     }
2724   }
2725 }
2726 
2727 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2728   assert_lock_strong(SpaceManager::expand_lock());
2729   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2730                 _free_chunks_total, _free_chunks_count);
2731 }
2732 
2733 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2734   assert_lock_strong(SpaceManager::expand_lock());
2735   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
2736                 sum_free_chunks(), sum_free_chunks_count());
2737 }
2738 
2739 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2740   assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2741          "Bad index: %d", (int)index);
2742 
2743   return &_free_chunks[index];
2744 }
2745 
2746 // These methods that sum the free chunk lists are used in printing
2747 // methods that are used in product builds.
2748 size_t ChunkManager::sum_free_chunks() {
2749   assert_lock_strong(SpaceManager::expand_lock());
2750   size_t result = 0;
2751   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2752     ChunkList* list = free_chunks(i);
2753 
2754     if (list == NULL) {
2755       continue;
2756     }
2757 
2758     result = result + list->count() * list->size();
2759   }
2760   result = result + humongous_dictionary()->total_size();
2761   return result;
2762 }
2763 
2764 size_t ChunkManager::sum_free_chunks_count() {
2765   assert_lock_strong(SpaceManager::expand_lock());
2766   size_t count = 0;
2767   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2768     ChunkList* list = free_chunks(i);
2769     if (list == NULL) {
2770       continue;
2771     }
2772     count = count + list->count();
2773   }
2774   count = count + humongous_dictionary()->total_free_blocks();
2775   return count;
2776 }
2777 
2778 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2779   ChunkIndex index = list_index(word_size);
2780   assert(index < HumongousIndex, "No humongous list");
2781   return free_chunks(index);
2782 }
2783 
2784 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
2785 // split up the larger chunk into n smaller chunks, at least one of which should be
2786 // the target chunk of target chunk size. The smaller chunks, including the target
2787 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
2788 // Note that this chunk is supposed to be removed from the freelist right away.
2789 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
2790   assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
2791 
2792   const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
2793   const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
2794 
2795   MetaWord* const region_start = (MetaWord*)larger_chunk;
2796   const size_t region_word_len = larger_chunk->word_size();
2797   MetaWord* const region_end = region_start + region_word_len;
2798   VirtualSpaceNode* const vsn = larger_chunk->container();
2799   OccupancyMap* const ocmap = vsn->occupancy_map();
2800 
2801   // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
2802   // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
2803   // at an address suitable to place the smaller target chunk.
2804   assert_is_aligned(region_start, target_chunk_word_size);
2805 
2806   // Remove old chunk.
2807   free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2808   larger_chunk->remove_sentinel();
2809 
2810   // Prevent access to the old chunk from here on.
2811   larger_chunk = NULL;
2812   // ... and wipe it.
2813   DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2814 
2815   // In its place create first the target chunk...
2816   MetaWord* p = region_start;
2817   Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
2818   assert(target_chunk == (Metachunk*)p, "Sanity");
2819   target_chunk->set_origin(origin_split);
2820 
2821   // Note: we do not need to mark its start in the occupancy map
2822   // because it coincides with the old chunk start.
2823 
2824   // Mark chunk as free and return to the freelist.
2825   do_update_in_use_info_for_chunk(target_chunk, false);
2826   free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
2827 
2828   // This chunk should now be valid and can be verified.
2829   DEBUG_ONLY(do_verify_chunk(target_chunk));
2830 
2831   // In the remaining space create the remainder chunks.
2832   p += target_chunk->word_size();
2833   assert(p < region_end, "Sanity");
2834 
2835   while (p < region_end) {
2836 
2837     // Find the largest chunk size which fits the alignment requirements at address p.
2838     ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2839     size_t this_chunk_word_size = 0;
2840     for(;;) {
2841       this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2842       if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2843         break;
2844       } else {
2845         this_chunk_index = prev_chunk_index(this_chunk_index);
2846         assert(this_chunk_index >= target_chunk_index, "Sanity");
2847       }
2848     }
2849 
2850     assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
2851     assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2852     assert(p + this_chunk_word_size <= region_end, "Sanity");
2853 
2854     // Create splitting chunk.
2855     Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2856     assert(this_chunk == (Metachunk*)p, "Sanity");
2857     this_chunk->set_origin(origin_split);
2858     ocmap->set_chunk_starts_at_address(p, true);
2859     do_update_in_use_info_for_chunk(this_chunk, false);
2860 
2861     // This chunk should be valid and can be verified.
2862     DEBUG_ONLY(do_verify_chunk(this_chunk));
2863 
2864     // Return this chunk to freelist and correct counter.
2865     free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2866     _free_chunks_count ++;
2867 
2868     log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2869       SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2870       p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2871       p2i(region_start), p2i(region_end));
2872 
2873     p += this_chunk_word_size;
2874 
2875   }
2876 
2877   return target_chunk;
2878 }
2879 
2880 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2881   assert_lock_strong(SpaceManager::expand_lock());
2882 
2883   slow_locked_verify();
2884 
2885   Metachunk* chunk = NULL;
2886   bool we_did_split_a_chunk = false;
2887 
2888   if (list_index(word_size) != HumongousIndex) {
2889 
2890     ChunkList* free_list = find_free_chunks_list(word_size);
2891     assert(free_list != NULL, "Sanity check");
2892 
2893     chunk = free_list->head();
2894 
2895     if (chunk == NULL) {
2896       // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
2897       // This is the counterpart of the coalescing-upon-chunk-return.
2898 
2899       ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2900 
2901       // Is there a larger chunk we could split?
2902       Metachunk* larger_chunk = NULL;
2903       ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2904       while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2905         larger_chunk = free_chunks(larger_chunk_index)->head();
2906         if (larger_chunk == NULL) {
2907           larger_chunk_index = next_chunk_index(larger_chunk_index);
2908         }
2909       }
2910 
2911       if (larger_chunk != NULL) {
2912         assert(larger_chunk->word_size() > word_size, "Sanity");
2913         assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2914 
2915         // We found a larger chunk. Lets split it up:
2916         // - remove old chunk
2917         // - in its place, create new smaller chunks, with at least one chunk
2918         //   being of target size, the others sized as large as possible. This
2919         //   is to make sure the resulting chunks are "as coalesced as possible"
2920         //   (similar to VirtualSpaceNode::retire()).
2921         // Note: during this operation both ChunkManager and VirtualSpaceNode
2922         //  are temporarily invalid, so be careful with asserts.
2923 
2924         log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
2925            ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2926           (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2927           chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2928 
2929         chunk = split_chunk(word_size, larger_chunk);
2930 
2931         // This should have worked.
2932         assert(chunk != NULL, "Sanity");
2933         assert(chunk->word_size() == word_size, "Sanity");
2934         assert(chunk->is_tagged_free(), "Sanity");
2935 
2936         we_did_split_a_chunk = true;
2937 
2938       }
2939     }
2940 
2941     if (chunk == NULL) {
2942       return NULL;
2943     }
2944 
2945     // Remove the chunk as the head of the list.
2946     free_list->remove_chunk(chunk);
2947 
2948     log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
2949                                        p2i(free_list), free_list->count());
2950 
2951   } else {
2952     chunk = humongous_dictionary()->get_chunk(word_size);
2953 
2954     if (chunk == NULL) {
2955       return NULL;
2956     }
2957 
2958     log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2959                                     chunk->word_size(), word_size, chunk->word_size() - word_size);
2960   }
2961 
2962   // Chunk has been removed from the chunk manager; update counters.
2963   account_for_removed_chunk(chunk);
2964   do_update_in_use_info_for_chunk(chunk, true);
2965   chunk->container()->inc_container_count();
2966   chunk->inc_use_count();
2967 
2968   // Remove it from the links to this freelist
2969   chunk->set_next(NULL);
2970   chunk->set_prev(NULL);
2971 
2972   // Run some verifications (some more if we did a chunk split)
2973 #ifdef ASSERT
2974   if (VerifyMetaspace) {
2975     locked_verify();
2976     VirtualSpaceNode* const vsn = chunk->container();
2977     vsn->verify();
2978     if (we_did_split_a_chunk) {
2979       vsn->verify_free_chunks_are_ideally_merged();
2980     }
2981   }
2982 #endif
2983 
2984   return chunk;
2985 }
2986 
2987 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2988   assert_lock_strong(SpaceManager::expand_lock());
2989   slow_locked_verify();
2990 
2991   // Take from the beginning of the list
2992   Metachunk* chunk = free_chunks_get(word_size);
2993   if (chunk == NULL) {
2994     return NULL;
2995   }
2996 
2997   assert((word_size <= chunk->word_size()) ||
2998          (list_index(chunk->word_size()) == HumongousIndex),
2999          "Non-humongous variable sized chunk");
3000   LogTarget(Debug, gc, metaspace, freelist) lt;
3001   if (lt.is_enabled()) {
3002     size_t list_count;
3003     if (list_index(word_size) < HumongousIndex) {
3004       ChunkList* list = find_free_chunks_list(word_size);
3005       list_count = list->count();
3006     } else {
3007       list_count = humongous_dictionary()->total_count();
3008     }
3009     LogStream ls(lt);
3010     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
3011              p2i(this), p2i(chunk), chunk->word_size(), list_count);
3012     ResourceMark rm;
3013     locked_print_free_chunks(&ls);
3014   }
3015 
3016   return chunk;
3017 }
3018 
3019 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
3020   assert_lock_strong(SpaceManager::expand_lock());
3021   DEBUG_ONLY(do_verify_chunk(chunk);)
3022   assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
3023   assert(chunk != NULL, "Expected chunk.");
3024   assert(chunk->container() != NULL, "Container should have been set.");
3025   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
3026   index_bounds_check(index);
3027 
3028   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
3029   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
3030   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
3031   DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
3032 
3033   if (index != HumongousIndex) {
3034     // Return non-humongous chunk to freelist.
3035     ChunkList* list = free_chunks(index);
3036     assert(list->size() == chunk->word_size(), "Wrong chunk type.");
3037     list->return_chunk_at_head(chunk);
3038     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
3039         chunk_size_name(index), p2i(chunk));
3040   } else {
3041     // Return humongous chunk to dictionary.
3042     assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
3043     assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
3044            "Humongous chunk has wrong alignment.");
3045     _humongous_dictionary.return_chunk(chunk);
3046     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
3047         chunk_size_name(index), p2i(chunk), chunk->word_size());
3048   }
3049   chunk->container()->dec_container_count();
3050   do_update_in_use_info_for_chunk(chunk, false);
3051 
3052   // Chunk has been added; update counters.
3053   account_for_added_chunk(chunk);
3054 
3055   // Attempt coalesce returned chunks with its neighboring chunks:
3056   // if this chunk is small or special, attempt to coalesce to a medium chunk.
3057   if (index == SmallIndex || index == SpecializedIndex) {
3058     if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
3059       // This did not work. But if this chunk is special, we still may form a small chunk?
3060       if (index == SpecializedIndex) {
3061         if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
3062           // give up.
3063         }
3064       }
3065     }
3066   }
3067 
3068 }
3069 
3070 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3071   index_bounds_check(index);
3072   if (chunks == NULL) {
3073     return;
3074   }
3075   LogTarget(Trace, gc, metaspace, freelist) log;
3076   if (log.is_enabled()) { // tracing
3077     log.print("returning list of %s chunks...", chunk_size_name(index));
3078   }
3079   unsigned num_chunks_returned = 0;
3080   size_t size_chunks_returned = 0;
3081   Metachunk* cur = chunks;
3082   while (cur != NULL) {
3083     // Capture the next link before it is changed
3084     // by the call to return_chunk_at_head();
3085     Metachunk* next = cur->next();
3086     if (log.is_enabled()) { // tracing
3087       num_chunks_returned ++;
3088       size_chunks_returned += cur->word_size();
3089     }
3090     return_single_chunk(index, cur);
3091     cur = next;
3092   }
3093   if (log.is_enabled()) { // tracing
3094     log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".",
3095         num_chunks_returned, chunk_size_name(index), size_chunks_returned);
3096     if (index != HumongousIndex) {
3097       log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size());
3098     } else {
3099       log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count());
3100     }
3101   }
3102 }
3103 
3104 void ChunkManager::print_on(outputStream* out) const {
3105   _humongous_dictionary.report_statistics(out);
3106 }
3107 
3108 void ChunkManager::locked_get_statistics(ChunkManagerStatistics* stat) const {
3109   assert_lock_strong(SpaceManager::expand_lock());
3110   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3111     stat->num_by_type[i] = num_free_chunks(i);
3112     stat->single_size_by_type[i] = size_by_index(i);
3113     stat->total_size_by_type[i] = size_free_chunks_in_bytes(i);
3114   }
3115   stat->num_humongous_chunks = num_free_chunks(HumongousIndex);
3116   stat->total_size_humongous_chunks = size_free_chunks_in_bytes(HumongousIndex);
3117 }
3118 
3119 void ChunkManager::get_statistics(ChunkManagerStatistics* stat) const {
3120   MutexLockerEx cl(SpaceManager::expand_lock(),
3121                    Mutex::_no_safepoint_check_flag);
3122   locked_get_statistics(stat);
3123 }
3124 
3125 void ChunkManager::print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale) {
3126   size_t total = 0;
3127   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3128 
3129   const char* unit = scale_unit(scale);
3130   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
3131     out->print("  " SIZE_FORMAT " %s (" SIZE_FORMAT " bytes) chunks, total ",
3132                    stat->num_by_type[i], chunk_size_name(i),
3133                    stat->single_size_by_type[i]);
3134     if (scale == 1) {
3135       out->print_cr(SIZE_FORMAT " bytes", stat->total_size_by_type[i]);
3136     } else {
3137       out->print_cr("%.2f%s", (float)stat->total_size_by_type[i] / scale, unit);
3138     }
3139 
3140     total += stat->total_size_by_type[i];
3141   }
3142 
3143 
3144   total += stat->total_size_humongous_chunks;
3145 
3146   if (scale == 1) {
3147     out->print_cr("  " SIZE_FORMAT " humongous chunks, total " SIZE_FORMAT " bytes",
3148     stat->num_humongous_chunks, stat->total_size_humongous_chunks);
3149 
3150     out->print_cr("  total size: " SIZE_FORMAT " bytes.", total);
3151   } else {
3152     out->print_cr("  " SIZE_FORMAT " humongous chunks, total %.2f%s",
3153     stat->num_humongous_chunks,
3154     (float)stat->total_size_humongous_chunks / scale, unit);
3155 
3156     out->print_cr("  total size: %.2f%s.", (float)total / scale, unit);
3157   }
3158 
3159 }
3160 
3161 void ChunkManager::print_all_chunkmanagers(outputStream* out, size_t scale) {
3162   assert(scale == 1 || scale == K || scale == M || scale == G, "Invalid scale");
3163 
3164   // Note: keep lock protection only to retrieving statistics; keep printing
3165   // out of lock protection
3166   ChunkManagerStatistics stat;
3167   out->print_cr("Chunkmanager (non-class):");
3168   const ChunkManager* const non_class_cm = Metaspace::chunk_manager_metadata();
3169   if (non_class_cm != NULL) {
3170     non_class_cm->get_statistics(&stat);
3171     ChunkManager::print_statistics(&stat, out, scale);
3172   } else {
3173     out->print_cr("unavailable.");
3174   }
3175   out->print_cr("Chunkmanager (class):");
3176   const ChunkManager* const class_cm = Metaspace::chunk_manager_class();
3177   if (class_cm != NULL) {
3178     class_cm->get_statistics(&stat);
3179     ChunkManager::print_statistics(&stat, out, scale);
3180   } else {
3181     out->print_cr("unavailable.");
3182   }
3183 }
3184 
3185 // SpaceManager methods
3186 
3187 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
3188   size_t chunk_sizes[] = {
3189       specialized_chunk_size(is_class_space),
3190       small_chunk_size(is_class_space),
3191       medium_chunk_size(is_class_space)
3192   };
3193 
3194   // Adjust up to one of the fixed chunk sizes ...
3195   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
3196     if (requested <= chunk_sizes[i]) {
3197       return chunk_sizes[i];
3198     }
3199   }
3200 
3201   // ... or return the size as a humongous chunk.
3202   return requested;
3203 }
3204 
3205 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
3206   return adjust_initial_chunk_size(requested, is_class());
3207 }
3208 
3209 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
3210   size_t requested;
3211 
3212   if (is_class()) {
3213     switch (type) {
3214     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
3215     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
3216     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
3217     default:                                 requested = ClassSmallChunk; break;
3218     }
3219   } else {
3220     switch (type) {
3221     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
3222     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
3223     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
3224     default:                                 requested = SmallChunk; break;
3225     }
3226   }
3227 
3228   // Adjust to one of the fixed chunk sizes (unless humongous)
3229   const size_t adjusted = adjust_initial_chunk_size(requested);
3230 
3231   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
3232          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
3233 
3234   return adjusted;
3235 }
3236 
3237 size_t SpaceManager::sum_free_in_chunks_in_use() const {
3238   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3239   size_t free = 0;
3240   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3241     Metachunk* chunk = chunks_in_use(i);
3242     while (chunk != NULL) {
3243       free += chunk->free_word_size();
3244       chunk = chunk->next();
3245     }
3246   }
3247   return free;
3248 }
3249 
3250 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
3251   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3252   size_t result = 0;
3253   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3254    result += sum_waste_in_chunks_in_use(i);
3255   }
3256 
3257   return result;
3258 }
3259 
3260 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
3261   size_t result = 0;
3262   Metachunk* chunk = chunks_in_use(index);
3263   // Count the free space in all the chunk but not the
3264   // current chunk from which allocations are still being done.
3265   while (chunk != NULL) {
3266     if (chunk != current_chunk()) {
3267       result += chunk->free_word_size();
3268     }
3269     chunk = chunk->next();
3270   }
3271   return result;
3272 }
3273 
3274 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
3275   // For CMS use "allocated_chunks_words()" which does not need the
3276   // Metaspace lock.  For the other collectors sum over the
3277   // lists.  Use both methods as a check that "allocated_chunks_words()"
3278   // is correct.  That is, sum_capacity_in_chunks() is too expensive
3279   // to use in the product and allocated_chunks_words() should be used
3280   // but allow for  checking that allocated_chunks_words() returns the same
3281   // value as sum_capacity_in_chunks_in_use() which is the definitive
3282   // answer.
3283   if (UseConcMarkSweepGC) {
3284     return allocated_chunks_words();
3285   } else {
3286     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3287     size_t sum = 0;
3288     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3289       Metachunk* chunk = chunks_in_use(i);
3290       while (chunk != NULL) {
3291         sum += chunk->word_size();
3292         chunk = chunk->next();
3293       }
3294     }
3295   return sum;
3296   }
3297 }
3298 
3299 size_t SpaceManager::sum_count_in_chunks_in_use() {
3300   size_t count = 0;
3301   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3302     count = count + sum_count_in_chunks_in_use(i);
3303   }
3304 
3305   return count;
3306 }
3307 
3308 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
3309   size_t count = 0;
3310   Metachunk* chunk = chunks_in_use(i);
3311   while (chunk != NULL) {
3312     count++;
3313     chunk = chunk->next();
3314   }
3315   return count;
3316 }
3317 
3318 
3319 size_t SpaceManager::sum_used_in_chunks_in_use() const {
3320   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3321   size_t used = 0;
3322   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3323     Metachunk* chunk = chunks_in_use(i);
3324     while (chunk != NULL) {
3325       used += chunk->used_word_size();
3326       chunk = chunk->next();
3327     }
3328   }
3329   return used;
3330 }
3331 
3332 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
3333 
3334   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3335     Metachunk* chunk = chunks_in_use(i);
3336     st->print("SpaceManager: %s " PTR_FORMAT,
3337                  chunk_size_name(i), p2i(chunk));
3338     if (chunk != NULL) {
3339       st->print_cr(" free " SIZE_FORMAT,
3340                    chunk->free_word_size());
3341     } else {
3342       st->cr();
3343     }
3344   }
3345 
3346   chunk_manager()->locked_print_free_chunks(st);
3347   chunk_manager()->locked_print_sum_free_chunks(st);
3348 }
3349 
3350 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3351 
3352   // Decide between a small chunk and a medium chunk.  Up to
3353   // _small_chunk_limit small chunks can be allocated.
3354   // After that a medium chunk is preferred.
3355   size_t chunk_word_size;
3356 
3357   // Special case for anonymous metadata space.
3358   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3359   // rarely about 4K (64-bits JVM).
3360   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3361   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3362   // reduces space waste from 60+% to around 30%.
3363   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3364       _mdtype == Metaspace::NonClassType &&
3365       sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3366       word_size + Metachunk::overhead() <= SpecializedChunk) {
3367     return SpecializedChunk;
3368   }
3369 
3370   if (chunks_in_use(MediumIndex) == NULL &&
3371       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3372     chunk_word_size = (size_t) small_chunk_size();
3373     if (word_size + Metachunk::overhead() > small_chunk_size()) {
3374       chunk_word_size = medium_chunk_size();
3375     }
3376   } else {
3377     chunk_word_size = medium_chunk_size();
3378   }
3379 
3380   // Might still need a humongous chunk.  Enforce
3381   // humongous allocations sizes to be aligned up to
3382   // the smallest chunk size.
3383   size_t if_humongous_sized_chunk =
3384     align_up(word_size + Metachunk::overhead(),
3385                   smallest_chunk_size());
3386   chunk_word_size =
3387     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
3388 
3389   assert(!SpaceManager::is_humongous(word_size) ||
3390          chunk_word_size == if_humongous_sized_chunk,
3391          "Size calculation is wrong, word_size " SIZE_FORMAT
3392          " chunk_word_size " SIZE_FORMAT,
3393          word_size, chunk_word_size);
3394   Log(gc, metaspace, alloc) log;
3395   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
3396     log.debug("Metadata humongous allocation:");
3397     log.debug("  word_size " PTR_FORMAT, word_size);
3398     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
3399     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
3400   }
3401   return chunk_word_size;
3402 }
3403 
3404 void SpaceManager::track_metaspace_memory_usage() {
3405   if (is_init_completed()) {
3406     if (is_class()) {
3407       MemoryService::track_compressed_class_memory_usage();
3408     }
3409     MemoryService::track_metaspace_memory_usage();
3410   }
3411 }
3412 
3413 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
3414   assert(vs_list()->current_virtual_space() != NULL,
3415          "Should have been set");
3416   assert(current_chunk() == NULL ||
3417          current_chunk()->allocate(word_size) == NULL,
3418          "Don't need to expand");
3419   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3420 
3421   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3422     size_t words_left = 0;
3423     size_t words_used = 0;
3424     if (current_chunk() != NULL) {
3425       words_left = current_chunk()->free_word_size();
3426       words_used = current_chunk()->used_word_size();
3427     }
3428     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
3429                                        word_size, words_used, words_left);
3430   }
3431 
3432   // Get another chunk
3433   size_t chunk_word_size = calc_chunk_size(word_size);
3434   Metachunk* next = get_new_chunk(chunk_word_size);
3435 
3436   MetaWord* mem = NULL;
3437 
3438   // If a chunk was available, add it to the in-use chunk list
3439   // and do an allocation from it.
3440   if (next != NULL) {
3441     // Add to this manager's list of chunks in use.
3442     add_chunk(next, false);
3443     mem = next->allocate(word_size);
3444   }
3445 
3446   // Track metaspace memory usage statistic.
3447   track_metaspace_memory_usage();
3448 
3449   return mem;
3450 }
3451 
3452 void SpaceManager::print_on(outputStream* st) const {
3453 
3454   for (ChunkIndex i = ZeroIndex;
3455        i < NumberOfInUseLists ;
3456        i = next_chunk_index(i) ) {
3457     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
3458                  p2i(chunks_in_use(i)),
3459                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
3460   }
3461   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
3462                " Humongous " SIZE_FORMAT,
3463                sum_waste_in_chunks_in_use(SmallIndex),
3464                sum_waste_in_chunks_in_use(MediumIndex),
3465                sum_waste_in_chunks_in_use(HumongousIndex));
3466   // block free lists
3467   if (block_freelists() != NULL) {
3468     st->print_cr("total in block free lists " SIZE_FORMAT,
3469       block_freelists()->total_size());
3470   }
3471 }
3472 
3473 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
3474                            Metaspace::MetaspaceType space_type,
3475                            Mutex* lock) :
3476   _mdtype(mdtype),
3477   _space_type(space_type),
3478   _allocated_blocks_words(0),
3479   _allocated_chunks_words(0),
3480   _allocated_chunks_count(0),
3481   _block_freelists(NULL),
3482   _lock(lock)
3483 {
3484   initialize();
3485 }
3486 
3487 void SpaceManager::inc_size_metrics(size_t words) {
3488   assert_lock_strong(SpaceManager::expand_lock());
3489   // Total of allocated Metachunks and allocated Metachunks count
3490   // for each SpaceManager
3491   _allocated_chunks_words = _allocated_chunks_words + words;
3492   _allocated_chunks_count++;
3493   // Global total of capacity in allocated Metachunks
3494   MetaspaceUtils::inc_capacity(mdtype(), words);
3495   // Global total of allocated Metablocks.
3496   // used_words_slow() includes the overhead in each
3497   // Metachunk so include it in the used when the
3498   // Metachunk is first added (so only added once per
3499   // Metachunk).
3500   MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
3501 }
3502 
3503 void SpaceManager::inc_used_metrics(size_t words) {
3504   // Add to the per SpaceManager total
3505   Atomic::add(words, &_allocated_blocks_words);
3506   // Add to the global total
3507   MetaspaceUtils::inc_used(mdtype(), words);
3508 }
3509 
3510 void SpaceManager::dec_total_from_size_metrics() {
3511   MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
3512   MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
3513   // Also deduct the overhead per Metachunk
3514   MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
3515 }
3516 
3517 void SpaceManager::initialize() {
3518   Metadebug::init_allocation_fail_alot_count();
3519   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3520     _chunks_in_use[i] = NULL;
3521   }
3522   _current_chunk = NULL;
3523   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3524 }
3525 
3526 SpaceManager::~SpaceManager() {
3527   // This call this->_lock which can't be done while holding expand_lock()
3528   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3529          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3530          " allocated_chunks_words() " SIZE_FORMAT,
3531          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3532 
3533   MutexLockerEx fcl(SpaceManager::expand_lock(),
3534                     Mutex::_no_safepoint_check_flag);
3535 
3536   assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3537          "sum_count_in_chunks_in_use() " SIZE_FORMAT
3538          " allocated_chunks_count() " SIZE_FORMAT,
3539          sum_count_in_chunks_in_use(), allocated_chunks_count());
3540 
3541   chunk_manager()->slow_locked_verify();
3542 
3543   dec_total_from_size_metrics();
3544 
3545   Log(gc, metaspace, freelist) log;
3546   if (log.is_trace()) {
3547     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3548     ResourceMark rm;
3549     LogStream ls(log.trace());
3550     locked_print_chunks_in_use_on(&ls);
3551     if (block_freelists() != NULL) {
3552       block_freelists()->print_on(&ls);
3553     }
3554   }
3555 
3556   // Add all the chunks in use by this space manager
3557   // to the global list of free chunks.
3558 
3559   // Follow each list of chunks-in-use and add them to the
3560   // free lists.  Each list is NULL terminated.
3561 
3562   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
3563     Metachunk* chunks = chunks_in_use(i);
3564     chunk_manager()->return_chunk_list(i, chunks);
3565     set_chunks_in_use(i, NULL);
3566   }
3567 
3568   chunk_manager()->slow_locked_verify();
3569 
3570   if (_block_freelists != NULL) {
3571     delete _block_freelists;
3572   }
3573 }
3574 
3575 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
3576   assert_lock_strong(_lock);
3577   // Allocations and deallocations are in raw_word_size
3578   size_t raw_word_size = get_allocation_word_size(word_size);
3579   // Lazily create a block_freelist
3580   if (block_freelists() == NULL) {
3581     _block_freelists = new BlockFreelist();
3582   }
3583   block_freelists()->return_block(p, raw_word_size);
3584 }
3585 
3586 // Adds a chunk to the list of chunks in use.
3587 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
3588 
3589   assert(new_chunk != NULL, "Should not be NULL");
3590   assert(new_chunk->next() == NULL, "Should not be on a list");
3591 
3592   new_chunk->reset_empty();
3593 
3594   // Find the correct list and and set the current
3595   // chunk for that list.
3596   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
3597 
3598   if (index != HumongousIndex) {
3599     retire_current_chunk();
3600     set_current_chunk(new_chunk);
3601     new_chunk->set_next(chunks_in_use(index));
3602     set_chunks_in_use(index, new_chunk);
3603   } else {
3604     // For null class loader data and DumpSharedSpaces, the first chunk isn't
3605     // small, so small will be null.  Link this first chunk as the current
3606     // chunk.
3607     if (make_current) {
3608       // Set as the current chunk but otherwise treat as a humongous chunk.
3609       set_current_chunk(new_chunk);
3610     }
3611     // Link at head.  The _current_chunk only points to a humongous chunk for
3612     // the null class loader metaspace (class and data virtual space managers)
3613     // any humongous chunks so will not point to the tail
3614     // of the humongous chunks list.
3615     new_chunk->set_next(chunks_in_use(HumongousIndex));
3616     set_chunks_in_use(HumongousIndex, new_chunk);
3617 
3618     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
3619   }
3620 
3621   // Add to the running sum of capacity
3622   inc_size_metrics(new_chunk->word_size());
3623 
3624   assert(new_chunk->is_empty(), "Not ready for reuse");
3625   Log(gc, metaspace, freelist) log;
3626   if (log.is_trace()) {
3627     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
3628     ResourceMark rm;
3629     LogStream ls(log.trace());
3630     new_chunk->print_on(&ls);
3631     chunk_manager()->locked_print_free_chunks(&ls);
3632   }
3633 }
3634 
3635 void SpaceManager::retire_current_chunk() {
3636   if (current_chunk() != NULL) {
3637     size_t remaining_words = current_chunk()->free_word_size();
3638     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
3639       MetaWord* ptr = current_chunk()->allocate(remaining_words);
3640       deallocate(ptr, remaining_words);
3641       inc_used_metrics(remaining_words);
3642     }
3643   }
3644 }
3645 
3646 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3647   // Get a chunk from the chunk freelist
3648   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3649 
3650   if (next == NULL) {
3651     next = vs_list()->get_new_chunk(chunk_word_size,
3652                                     medium_chunk_bunch());
3653   }
3654 
3655   Log(gc, metaspace, alloc) log;
3656   if (log.is_debug() && next != NULL &&
3657       SpaceManager::is_humongous(next->word_size())) {
3658     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
3659   }
3660 
3661   return next;
3662 }
3663 
3664 MetaWord* SpaceManager::allocate(size_t word_size) {
3665   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3666   size_t raw_word_size = get_allocation_word_size(word_size);
3667   BlockFreelist* fl =  block_freelists();
3668   MetaWord* p = NULL;
3669   // Allocation from the dictionary is expensive in the sense that
3670   // the dictionary has to be searched for a size.  Don't allocate
3671   // from the dictionary until it starts to get fat.  Is this
3672   // a reasonable policy?  Maybe an skinny dictionary is fast enough
3673   // for allocations.  Do some profiling.  JJJ
3674   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3675     p = fl->get_block(raw_word_size);
3676   }
3677   if (p == NULL) {
3678     p = allocate_work(raw_word_size);
3679   }
3680 
3681   return p;
3682 }
3683 
3684 // Returns the address of spaced allocated for "word_size".
3685 // This methods does not know about blocks (Metablocks)
3686 MetaWord* SpaceManager::allocate_work(size_t word_size) {
3687   assert_lock_strong(_lock);
3688 #ifdef ASSERT
3689   if (Metadebug::test_metadata_failure()) {
3690     return NULL;
3691   }
3692 #endif
3693   // Is there space in the current chunk?
3694   MetaWord* result = NULL;
3695 
3696   if (current_chunk() != NULL) {
3697     result = current_chunk()->allocate(word_size);
3698   }
3699 
3700   if (result == NULL) {
3701     result = grow_and_allocate(word_size);
3702   }
3703 
3704   if (result != NULL) {
3705     inc_used_metrics(word_size);
3706     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3707            "Head of the list is being allocated");
3708   }
3709 
3710   return result;
3711 }
3712 
3713 void SpaceManager::verify() {
3714   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3715     Metachunk* curr = chunks_in_use(i);
3716     while (curr != NULL) {
3717       DEBUG_ONLY(do_verify_chunk(curr);)
3718       assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3719       curr = curr->next();
3720     }
3721   }
3722 }
3723 
3724 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3725   assert(is_humongous(chunk->word_size()) ||
3726          chunk->word_size() == medium_chunk_size() ||
3727          chunk->word_size() == small_chunk_size() ||
3728          chunk->word_size() == specialized_chunk_size(),
3729          "Chunk size is wrong");
3730   return;
3731 }
3732 
3733 #ifdef ASSERT
3734 void SpaceManager::verify_allocated_blocks_words() {
3735   // Verification is only guaranteed at a safepoint.
3736   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3737     "Verification can fail if the applications is running");
3738   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
3739          "allocation total is not consistent " SIZE_FORMAT
3740          " vs " SIZE_FORMAT,
3741          allocated_blocks_words(), sum_used_in_chunks_in_use());
3742 }
3743 
3744 #endif
3745 
3746 void SpaceManager::dump(outputStream* const out) const {
3747   size_t curr_total = 0;
3748   size_t waste = 0;
3749   uint i = 0;
3750   size_t used = 0;
3751   size_t capacity = 0;
3752 
3753   // Add up statistics for all chunks in this SpaceManager.
3754   for (ChunkIndex index = ZeroIndex;
3755        index < NumberOfInUseLists;
3756        index = next_chunk_index(index)) {
3757     for (Metachunk* curr = chunks_in_use(index);
3758          curr != NULL;
3759          curr = curr->next()) {
3760       out->print("%d) ", i++);
3761       curr->print_on(out);
3762       curr_total += curr->word_size();
3763       used += curr->used_word_size();
3764       capacity += curr->word_size();
3765       waste += curr->free_word_size() + curr->overhead();;
3766     }
3767   }
3768 
3769   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
3770     if (block_freelists() != NULL) block_freelists()->print_on(out);
3771   }
3772 
3773   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
3774   // Free space isn't wasted.
3775   waste -= free;
3776 
3777   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
3778                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
3779                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
3780 }
3781 
3782 // MetaspaceUtils
3783 
3784 
3785 size_t MetaspaceUtils::_capacity_words[] = {0, 0};
3786 volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
3787 
3788 size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
3789   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3790   return list == NULL ? 0 : list->free_bytes();
3791 }
3792 
3793 size_t MetaspaceUtils::free_bytes() {
3794   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
3795 }
3796 
3797 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
3798   assert_lock_strong(SpaceManager::expand_lock());
3799   assert(words <= capacity_words(mdtype),
3800          "About to decrement below 0: words " SIZE_FORMAT
3801          " is greater than _capacity_words[%u] " SIZE_FORMAT,
3802          words, mdtype, capacity_words(mdtype));
3803   _capacity_words[mdtype] -= words;
3804 }
3805 
3806 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
3807   assert_lock_strong(SpaceManager::expand_lock());
3808   // Needs to be atomic
3809   _capacity_words[mdtype] += words;
3810 }
3811 
3812 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
3813   assert(words <= used_words(mdtype),
3814          "About to decrement below 0: words " SIZE_FORMAT
3815          " is greater than _used_words[%u] " SIZE_FORMAT,
3816          words, mdtype, used_words(mdtype));
3817   // For CMS deallocation of the Metaspaces occurs during the
3818   // sweep which is a concurrent phase.  Protection by the expand_lock()
3819   // is not enough since allocation is on a per Metaspace basis
3820   // and protected by the Metaspace lock.
3821   Atomic::sub(words, &_used_words[mdtype]);
3822 }
3823 
3824 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
3825   // _used_words tracks allocations for
3826   // each piece of metadata.  Those allocations are
3827   // generally done concurrently by different application
3828   // threads so must be done atomically.
3829   Atomic::add(words, &_used_words[mdtype]);
3830 }
3831 
3832 size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
3833   size_t used = 0;
3834   ClassLoaderDataGraphMetaspaceIterator iter;
3835   while (iter.repeat()) {
3836     ClassLoaderMetaspace* msp = iter.get_next();
3837     // Sum allocated_blocks_words for each metaspace
3838     if (msp != NULL) {
3839       used += msp->used_words_slow(mdtype);
3840     }
3841   }
3842   return used * BytesPerWord;
3843 }
3844 
3845 size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
3846   size_t free = 0;
3847   ClassLoaderDataGraphMetaspaceIterator iter;
3848   while (iter.repeat()) {
3849     ClassLoaderMetaspace* msp = iter.get_next();
3850     if (msp != NULL) {
3851       free += msp->free_words_slow(mdtype);
3852     }
3853   }
3854   return free * BytesPerWord;
3855 }
3856 
3857 size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
3858   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
3859     return 0;
3860   }
3861   // Don't count the space in the freelists.  That space will be
3862   // added to the capacity calculation as needed.
3863   size_t capacity = 0;
3864   ClassLoaderDataGraphMetaspaceIterator iter;
3865   while (iter.repeat()) {
3866     ClassLoaderMetaspace* msp = iter.get_next();
3867     if (msp != NULL) {
3868       capacity += msp->capacity_words_slow(mdtype);
3869     }
3870   }
3871   return capacity * BytesPerWord;
3872 }
3873 
3874 size_t MetaspaceUtils::capacity_bytes_slow() {
3875 #ifdef PRODUCT
3876   // Use capacity_bytes() in PRODUCT instead of this function.
3877   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
3878 #endif
3879   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
3880   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
3881   assert(capacity_bytes() == class_capacity + non_class_capacity,
3882          "bad accounting: capacity_bytes() " SIZE_FORMAT
3883          " class_capacity + non_class_capacity " SIZE_FORMAT
3884          " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
3885          capacity_bytes(), class_capacity + non_class_capacity,
3886          class_capacity, non_class_capacity);
3887 
3888   return class_capacity + non_class_capacity;
3889 }
3890 
3891 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
3892   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3893   return list == NULL ? 0 : list->reserved_bytes();
3894 }
3895 
3896 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
3897   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
3898   return list == NULL ? 0 : list->committed_bytes();
3899 }
3900 
3901 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
3902 
3903 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
3904   ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
3905   if (chunk_manager == NULL) {
3906     return 0;
3907   }
3908   chunk_manager->slow_verify();
3909   return chunk_manager->free_chunks_total_words();
3910 }
3911 
3912 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
3913   return free_chunks_total_words(mdtype) * BytesPerWord;
3914 }
3915 
3916 size_t MetaspaceUtils::free_chunks_total_words() {
3917   return free_chunks_total_words(Metaspace::ClassType) +
3918          free_chunks_total_words(Metaspace::NonClassType);
3919 }
3920 
3921 size_t MetaspaceUtils::free_chunks_total_bytes() {
3922   return free_chunks_total_words() * BytesPerWord;
3923 }
3924 
3925 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
3926   return Metaspace::get_chunk_manager(mdtype) != NULL;
3927 }
3928 
3929 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
3930   if (!has_chunk_free_list(mdtype)) {
3931     return MetaspaceChunkFreeListSummary();
3932   }
3933 
3934   const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
3935   return cm->chunk_free_list_summary();
3936 }
3937 
3938 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
3939   log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
3940                           prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
3941 }
3942 
3943 void MetaspaceUtils::print_on(outputStream* out) {
3944   Metaspace::MetadataType nct = Metaspace::NonClassType;
3945 
3946   out->print_cr(" Metaspace       "
3947                 "used "      SIZE_FORMAT "K, "
3948                 "capacity "  SIZE_FORMAT "K, "
3949                 "committed " SIZE_FORMAT "K, "
3950                 "reserved "  SIZE_FORMAT "K",
3951                 used_bytes()/K,
3952                 capacity_bytes()/K,
3953                 committed_bytes()/K,
3954                 reserved_bytes()/K);
3955 
3956   if (Metaspace::using_class_space()) {
3957     Metaspace::MetadataType ct = Metaspace::ClassType;
3958     out->print_cr("  class space    "
3959                   "used "      SIZE_FORMAT "K, "
3960                   "capacity "  SIZE_FORMAT "K, "
3961                   "committed " SIZE_FORMAT "K, "
3962                   "reserved "  SIZE_FORMAT "K",
3963                   used_bytes(ct)/K,
3964                   capacity_bytes(ct)/K,
3965                   committed_bytes(ct)/K,
3966                   reserved_bytes(ct)/K);
3967   }
3968 }
3969 
3970 // Print information for class space and data space separately.
3971 // This is almost the same as above.
3972 void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
3973   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
3974   size_t capacity_bytes = capacity_bytes_slow(mdtype);
3975   size_t used_bytes = used_bytes_slow(mdtype);
3976   size_t free_bytes = free_bytes_slow(mdtype);
3977   size_t used_and_free = used_bytes + free_bytes +
3978                            free_chunks_capacity_bytes;
3979   out->print_cr("  Chunk accounting: (used in chunks " SIZE_FORMAT
3980              "K + unused in chunks " SIZE_FORMAT "K  + "
3981              " capacity in free chunks " SIZE_FORMAT "K) = " SIZE_FORMAT
3982              "K  capacity in allocated chunks " SIZE_FORMAT "K",
3983              used_bytes / K,
3984              free_bytes / K,
3985              free_chunks_capacity_bytes / K,
3986              used_and_free / K,
3987              capacity_bytes / K);
3988   // Accounting can only be correct if we got the values during a safepoint
3989   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
3990 }
3991 
3992 // Print total fragmentation for class metaspaces
3993 void MetaspaceUtils::print_class_waste(outputStream* out) {
3994   assert(Metaspace::using_class_space(), "class metaspace not used");
3995   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
3996   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
3997   ClassLoaderDataGraphMetaspaceIterator iter;
3998   while (iter.repeat()) {
3999     ClassLoaderMetaspace* msp = iter.get_next();
4000     if (msp != NULL) {
4001       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4002       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4003       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4004       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
4005       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4006       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
4007       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4008     }
4009   }
4010   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4011                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4012                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4013                 "large count " SIZE_FORMAT,
4014                 cls_specialized_count, cls_specialized_waste,
4015                 cls_small_count, cls_small_waste,
4016                 cls_medium_count, cls_medium_waste, cls_humongous_count);
4017 }
4018 
4019 // Print total fragmentation for data and class metaspaces separately
4020 void MetaspaceUtils::print_waste(outputStream* out) {
4021   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
4022   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
4023 
4024   ClassLoaderDataGraphMetaspaceIterator iter;
4025   while (iter.repeat()) {
4026     ClassLoaderMetaspace* msp = iter.get_next();
4027     if (msp != NULL) {
4028       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
4029       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
4030       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
4031       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
4032       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
4033       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
4034       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
4035     }
4036   }
4037   out->print_cr("Total fragmentation waste (words) doesn't count free space");
4038   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
4039                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
4040                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
4041                         "large count " SIZE_FORMAT,
4042              specialized_count, specialized_waste, small_count,
4043              small_waste, medium_count, medium_waste, humongous_count);
4044   if (Metaspace::using_class_space()) {
4045     print_class_waste(out);
4046   }
4047 }
4048 
4049 class MetadataStats {
4050 private:
4051   size_t _capacity;
4052   size_t _used;
4053   size_t _free;
4054   size_t _waste;
4055 
4056 public:
4057   MetadataStats() : _capacity(0), _used(0), _free(0), _waste(0) { }
4058   MetadataStats(size_t capacity, size_t used, size_t free, size_t waste)
4059   : _capacity(capacity), _used(used), _free(free), _waste(waste) { }
4060 
4061   void add(const MetadataStats& stats) {
4062     _capacity += stats.capacity();
4063     _used += stats.used();
4064     _free += stats.free();
4065     _waste += stats.waste();
4066   }
4067 
4068   size_t capacity() const { return _capacity; }
4069   size_t used() const     { return _used; }
4070   size_t free() const     { return _free; }
4071   size_t waste() const    { return _waste; }
4072 
4073   void print_on(outputStream* out, size_t scale) const;
4074 };
4075 
4076 
4077 void MetadataStats::print_on(outputStream* out, size_t scale) const {
4078   const char* unit = scale_unit(scale);
4079   out->print_cr("capacity=%10.2f%s used=%10.2f%s free=%10.2f%s waste=%10.2f%s",
4080     (float)capacity() / scale, unit,
4081     (float)used() / scale, unit,
4082     (float)free() / scale, unit,
4083     (float)waste() / scale, unit);
4084 }
4085 
4086 class PrintCLDMetaspaceInfoClosure : public CLDClosure {
4087 private:
4088   outputStream*  _out;
4089   size_t         _scale;
4090 
4091   size_t         _total_count;
4092   MetadataStats  _total_metadata;
4093   MetadataStats  _total_class;
4094 
4095   size_t         _total_anon_count;
4096   MetadataStats  _total_anon_metadata;
4097   MetadataStats  _total_anon_class;
4098 
4099 public:
4100   PrintCLDMetaspaceInfoClosure(outputStream* out, size_t scale = K)
4101   : _out(out), _scale(scale), _total_count(0), _total_anon_count(0) { }
4102 
4103   ~PrintCLDMetaspaceInfoClosure() {
4104     print_summary();
4105   }
4106 
4107   void do_cld(ClassLoaderData* cld) {
4108     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
4109 
4110     if (cld->is_unloading()) return;
4111     ClassLoaderMetaspace* msp = cld->metaspace_or_null();
4112     if (msp == NULL) {
4113       return;
4114     }
4115 
4116     bool anonymous = false;
4117     if (cld->is_anonymous()) {
4118       _out->print_cr("ClassLoader: for anonymous class");
4119       anonymous = true;
4120     } else {
4121       ResourceMark rm;
4122       _out->print_cr("ClassLoader: %s", cld->loader_name());
4123     }
4124 
4125     print_metaspace(msp, anonymous);
4126     _out->cr();
4127   }
4128 
4129 private:
4130   void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
4131   void print_summary() const;
4132 };
4133 
4134 void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
4135   assert(msp != NULL, "Sanity");
4136   SpaceManager* vsm = msp->vsm();
4137   const char* unit = scale_unit(_scale);
4138 
4139   size_t capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4140   size_t used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4141   size_t free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4142   size_t waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4143 
4144   _total_count ++;
4145   MetadataStats metadata_stats(capacity, used, free, waste);
4146   _total_metadata.add(metadata_stats);
4147 
4148   if (anonymous) {
4149     _total_anon_count ++;
4150     _total_anon_metadata.add(metadata_stats);
4151   }
4152 
4153   _out->print("  Metadata   ");
4154   metadata_stats.print_on(_out, _scale);
4155 
4156   if (Metaspace::using_class_space()) {
4157     vsm = msp->class_vsm();
4158 
4159     capacity = vsm->sum_capacity_in_chunks_in_use() * BytesPerWord;
4160     used = vsm->sum_used_in_chunks_in_use() * BytesPerWord;
4161     free = vsm->sum_free_in_chunks_in_use() * BytesPerWord;
4162     waste = vsm->sum_waste_in_chunks_in_use() * BytesPerWord;
4163 
4164     MetadataStats class_stats(capacity, used, free, waste);
4165     _total_class.add(class_stats);
4166 
4167     if (anonymous) {
4168       _total_anon_class.add(class_stats);
4169     }
4170 
4171     _out->print("  Class data ");
4172     class_stats.print_on(_out, _scale);
4173   }
4174 }
4175 
4176 void PrintCLDMetaspaceInfoClosure::print_summary() const {
4177   const char* unit = scale_unit(_scale);
4178   _out->cr();
4179   _out->print_cr("Summary:");
4180 
4181   MetadataStats total;
4182   total.add(_total_metadata);
4183   total.add(_total_class);
4184 
4185   _out->print("  Total class loaders=" SIZE_FORMAT_W(6) " ", _total_count);
4186   total.print_on(_out, _scale);
4187 
4188   _out->print("                    Metadata ");
4189   _total_metadata.print_on(_out, _scale);
4190 
4191   if (Metaspace::using_class_space()) {
4192     _out->print("                  Class data ");
4193     _total_class.print_on(_out, _scale);
4194   }
4195   _out->cr();
4196 
4197   MetadataStats total_anon;
4198   total_anon.add(_total_anon_metadata);
4199   total_anon.add(_total_anon_class);
4200 
4201   _out->print("For anonymous classes=" SIZE_FORMAT_W(6) " ", _total_anon_count);
4202   total_anon.print_on(_out, _scale);
4203 
4204   _out->print("                    Metadata ");
4205   _total_anon_metadata.print_on(_out, _scale);
4206 
4207   if (Metaspace::using_class_space()) {
4208     _out->print("                  Class data ");
4209     _total_anon_class.print_on(_out, _scale);
4210   }
4211 }
4212 
4213 void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
4214   const char* unit = scale_unit(scale);
4215   out->print_cr("Metaspaces:");
4216   out->print_cr("  Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4217     reserved_bytes(Metaspace::NonClassType) / scale, unit,
4218     committed_bytes(Metaspace::NonClassType) / scale, unit);
4219   if (Metaspace::using_class_space()) {
4220     out->print_cr("  Class    space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
4221     reserved_bytes(Metaspace::ClassType) / scale, unit,
4222     committed_bytes(Metaspace::ClassType) / scale, unit);
4223   }
4224 
4225   out->cr();
4226   ChunkManager::print_all_chunkmanagers(out, scale);
4227 
4228   out->cr();
4229   out->print_cr("Per-classloader metadata:");
4230   out->cr();
4231 
4232   PrintCLDMetaspaceInfoClosure cl(out, scale);
4233   ClassLoaderDataGraph::cld_do(&cl);
4234 }
4235 
4236 
4237 // Dump global metaspace things from the end of ClassLoaderDataGraph
4238 void MetaspaceUtils::dump(outputStream* out) {
4239   out->print_cr("All Metaspace:");
4240   out->print("data space: "); print_on(out, Metaspace::NonClassType);
4241   out->print("class space: "); print_on(out, Metaspace::ClassType);
4242   print_waste(out);
4243 }
4244 
4245 // Prints an ASCII representation of the given space.
4246 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
4247   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4248   const bool for_class = mdtype == Metaspace::ClassType ? true : false;
4249   VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4250   if (vsl != NULL) {
4251     if (for_class) {
4252       if (!Metaspace::using_class_space()) {
4253         out->print_cr("No Class Space.");
4254         return;
4255       }
4256       out->print_raw("---- Metaspace Map (Class Space) ----");
4257     } else {
4258       out->print_raw("---- Metaspace Map (Non-Class Space) ----");
4259     }
4260     // Print legend:
4261     out->cr();
4262     out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
4263     out->cr();
4264     VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
4265     vsl->print_map(out);
4266     out->cr();
4267   }
4268 }
4269 
4270 void MetaspaceUtils::verify_free_chunks() {
4271   Metaspace::chunk_manager_metadata()->verify();
4272   if (Metaspace::using_class_space()) {
4273     Metaspace::chunk_manager_class()->verify();
4274   }
4275 }
4276 
4277 void MetaspaceUtils::verify_capacity() {
4278 #ifdef ASSERT
4279   size_t running_sum_capacity_bytes = capacity_bytes();
4280   // For purposes of the running sum of capacity, verify against capacity
4281   size_t capacity_in_use_bytes = capacity_bytes_slow();
4282   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
4283          "capacity_words() * BytesPerWord " SIZE_FORMAT
4284          " capacity_bytes_slow()" SIZE_FORMAT,
4285          running_sum_capacity_bytes, capacity_in_use_bytes);
4286   for (Metaspace::MetadataType i = Metaspace::ClassType;
4287        i < Metaspace:: MetadataTypeCount;
4288        i = (Metaspace::MetadataType)(i + 1)) {
4289     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
4290     assert(capacity_bytes(i) == capacity_in_use_bytes,
4291            "capacity_bytes(%u) " SIZE_FORMAT
4292            " capacity_bytes_slow(%u)" SIZE_FORMAT,
4293            i, capacity_bytes(i), i, capacity_in_use_bytes);
4294   }
4295 #endif
4296 }
4297 
4298 void MetaspaceUtils::verify_used() {
4299 #ifdef ASSERT
4300   size_t running_sum_used_bytes = used_bytes();
4301   // For purposes of the running sum of used, verify against used
4302   size_t used_in_use_bytes = used_bytes_slow();
4303   assert(used_bytes() == used_in_use_bytes,
4304          "used_bytes() " SIZE_FORMAT
4305          " used_bytes_slow()" SIZE_FORMAT,
4306          used_bytes(), used_in_use_bytes);
4307   for (Metaspace::MetadataType i = Metaspace::ClassType;
4308        i < Metaspace:: MetadataTypeCount;
4309        i = (Metaspace::MetadataType)(i + 1)) {
4310     size_t used_in_use_bytes = used_bytes_slow(i);
4311     assert(used_bytes(i) == used_in_use_bytes,
4312            "used_bytes(%u) " SIZE_FORMAT
4313            " used_bytes_slow(%u)" SIZE_FORMAT,
4314            i, used_bytes(i), i, used_in_use_bytes);
4315   }
4316 #endif
4317 }
4318 
4319 void MetaspaceUtils::verify_metrics() {
4320   verify_capacity();
4321   verify_used();
4322 }
4323 
4324 
4325 // Metaspace methods
4326 
4327 size_t Metaspace::_first_chunk_word_size = 0;
4328 size_t Metaspace::_first_class_chunk_word_size = 0;
4329 
4330 size_t Metaspace::_commit_alignment = 0;
4331 size_t Metaspace::_reserve_alignment = 0;
4332 
4333 VirtualSpaceList* Metaspace::_space_list = NULL;
4334 VirtualSpaceList* Metaspace::_class_space_list = NULL;
4335 
4336 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
4337 ChunkManager* Metaspace::_chunk_manager_class = NULL;
4338 
4339 #define VIRTUALSPACEMULTIPLIER 2
4340 
4341 #ifdef _LP64
4342 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
4343 
4344 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
4345   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
4346   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
4347   // narrow_klass_base is the lower of the metaspace base and the cds base
4348   // (if cds is enabled).  The narrow_klass_shift depends on the distance
4349   // between the lower base and higher address.
4350   address lower_base;
4351   address higher_address;
4352 #if INCLUDE_CDS
4353   if (UseSharedSpaces) {
4354     higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4355                           (address)(metaspace_base + compressed_class_space_size()));
4356     lower_base = MIN2(metaspace_base, cds_base);
4357   } else
4358 #endif
4359   {
4360     higher_address = metaspace_base + compressed_class_space_size();
4361     lower_base = metaspace_base;
4362 
4363     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
4364     // If compressed class space fits in lower 32G, we don't need a base.
4365     if (higher_address <= (address)klass_encoding_max) {
4366       lower_base = 0; // Effectively lower base is zero.
4367     }
4368   }
4369 
4370   Universe::set_narrow_klass_base(lower_base);
4371 
4372   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
4373   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
4374   // how dump time narrow_klass_shift is set. Although, CDS can work
4375   // with zero-shift mode also, to be consistent with AOT it uses
4376   // LogKlassAlignmentInBytes for klass shift so archived java heap objects
4377   // can be used at same time as AOT code.
4378   if (!UseSharedSpaces
4379       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
4380     Universe::set_narrow_klass_shift(0);
4381   } else {
4382     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
4383   }
4384   AOTLoader::set_narrow_klass_shift();
4385 }
4386 
4387 #if INCLUDE_CDS
4388 // Return TRUE if the specified metaspace_base and cds_base are close enough
4389 // to work with compressed klass pointers.
4390 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
4391   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
4392   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4393   address lower_base = MIN2((address)metaspace_base, cds_base);
4394   address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
4395                                 (address)(metaspace_base + compressed_class_space_size()));
4396   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
4397 }
4398 #endif
4399 
4400 // Try to allocate the metaspace at the requested addr.
4401 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
4402   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
4403   assert(using_class_space(), "called improperly");
4404   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
4405   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
4406          "Metaspace size is too big");
4407   assert_is_aligned(requested_addr, _reserve_alignment);
4408   assert_is_aligned(cds_base, _reserve_alignment);
4409   assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
4410 
4411   // Don't use large pages for the class space.
4412   bool large_pages = false;
4413 
4414 #if !(defined(AARCH64) || defined(AIX))
4415   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
4416                                              _reserve_alignment,
4417                                              large_pages,
4418                                              requested_addr);
4419 #else // AARCH64
4420   ReservedSpace metaspace_rs;
4421 
4422   // Our compressed klass pointers may fit nicely into the lower 32
4423   // bits.
4424   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
4425     metaspace_rs = ReservedSpace(compressed_class_space_size(),
4426                                  _reserve_alignment,
4427                                  large_pages,
4428                                  requested_addr);
4429   }
4430 
4431   if (! metaspace_rs.is_reserved()) {
4432     // Aarch64: Try to align metaspace so that we can decode a compressed
4433     // klass with a single MOVK instruction.  We can do this iff the
4434     // compressed class base is a multiple of 4G.
4435     // Aix: Search for a place where we can find memory. If we need to load
4436     // the base, 4G alignment is helpful, too.
4437     size_t increment = AARCH64_ONLY(4*)G;
4438     for (char *a = align_up(requested_addr, increment);
4439          a < (char*)(1024*G);
4440          a += increment) {
4441       if (a == (char *)(32*G)) {
4442         // Go faster from here on. Zero-based is no longer possible.
4443         increment = 4*G;
4444       }
4445 
4446 #if INCLUDE_CDS
4447       if (UseSharedSpaces
4448           && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
4449         // We failed to find an aligned base that will reach.  Fall
4450         // back to using our requested addr.
4451         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4452                                      _reserve_alignment,
4453                                      large_pages,
4454                                      requested_addr);
4455         break;
4456       }
4457 #endif
4458 
4459       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4460                                    _reserve_alignment,
4461                                    large_pages,
4462                                    a);
4463       if (metaspace_rs.is_reserved())
4464         break;
4465     }
4466   }
4467 
4468 #endif // AARCH64
4469 
4470   if (!metaspace_rs.is_reserved()) {
4471 #if INCLUDE_CDS
4472     if (UseSharedSpaces) {
4473       size_t increment = align_up(1*G, _reserve_alignment);
4474 
4475       // Keep trying to allocate the metaspace, increasing the requested_addr
4476       // by 1GB each time, until we reach an address that will no longer allow
4477       // use of CDS with compressed klass pointers.
4478       char *addr = requested_addr;
4479       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
4480              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
4481         addr = addr + increment;
4482         metaspace_rs = ReservedSpace(compressed_class_space_size(),
4483                                      _reserve_alignment, large_pages, addr);
4484       }
4485     }
4486 #endif
4487     // If no successful allocation then try to allocate the space anywhere.  If
4488     // that fails then OOM doom.  At this point we cannot try allocating the
4489     // metaspace as if UseCompressedClassPointers is off because too much
4490     // initialization has happened that depends on UseCompressedClassPointers.
4491     // So, UseCompressedClassPointers cannot be turned off at this point.
4492     if (!metaspace_rs.is_reserved()) {
4493       metaspace_rs = ReservedSpace(compressed_class_space_size(),
4494                                    _reserve_alignment, large_pages);
4495       if (!metaspace_rs.is_reserved()) {
4496         vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
4497                                               compressed_class_space_size()));
4498       }
4499     }
4500   }
4501 
4502   // If we got here then the metaspace got allocated.
4503   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
4504 
4505 #if INCLUDE_CDS
4506   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
4507   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
4508     FileMapInfo::stop_sharing_and_unmap(
4509         "Could not allocate metaspace at a compatible address");
4510   }
4511 #endif
4512   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
4513                                   UseSharedSpaces ? (address)cds_base : 0);
4514 
4515   initialize_class_space(metaspace_rs);
4516 
4517   LogTarget(Trace, gc, metaspace) lt;
4518   if (lt.is_enabled()) {
4519     ResourceMark rm;
4520     LogStream ls(lt);
4521     print_compressed_class_space(&ls, requested_addr);
4522   }
4523 }
4524 
4525 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
4526   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
4527                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4528   if (_class_space_list != NULL) {
4529     address base = (address)_class_space_list->current_virtual_space()->bottom();
4530     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4531                  compressed_class_space_size(), p2i(base));
4532     if (requested_addr != 0) {
4533       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4534     }
4535     st->cr();
4536   }
4537 }
4538 
4539 // For UseCompressedClassPointers the class space is reserved above the top of
4540 // the Java heap.  The argument passed in is at the base of the compressed space.
4541 void Metaspace::initialize_class_space(ReservedSpace rs) {
4542   // The reserved space size may be bigger because of alignment, esp with UseLargePages
4543   assert(rs.size() >= CompressedClassSpaceSize,
4544          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4545   assert(using_class_space(), "Must be using class space");
4546   _class_space_list = new VirtualSpaceList(rs);
4547   _chunk_manager_class = new ChunkManager(true/*is_class*/);
4548 
4549   if (!_class_space_list->initialization_succeeded()) {
4550     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4551   }
4552 }
4553 
4554 #endif
4555 
4556 void Metaspace::ergo_initialize() {
4557   if (DumpSharedSpaces) {
4558     // Using large pages when dumping the shared archive is currently not implemented.
4559     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4560   }
4561 
4562   size_t page_size = os::vm_page_size();
4563   if (UseLargePages && UseLargePagesInMetaspace) {
4564     page_size = os::large_page_size();
4565   }
4566 
4567   _commit_alignment  = page_size;
4568   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
4569 
4570   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
4571   // override if MaxMetaspaceSize was set on the command line or not.
4572   // This information is needed later to conform to the specification of the
4573   // java.lang.management.MemoryUsage API.
4574   //
4575   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
4576   // globals.hpp to the aligned value, but this is not possible, since the
4577   // alignment depends on other flags being parsed.
4578   MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
4579 
4580   if (MetaspaceSize > MaxMetaspaceSize) {
4581     MetaspaceSize = MaxMetaspaceSize;
4582   }
4583 
4584   MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
4585 
4586   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
4587 
4588   MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
4589   MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
4590 
4591   CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
4592 
4593   // Initial virtual space size will be calculated at global_initialize()
4594   size_t min_metaspace_sz =
4595       VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
4596   if (UseCompressedClassPointers) {
4597     if ((min_metaspace_sz + CompressedClassSpaceSize) >  MaxMetaspaceSize) {
4598       if (min_metaspace_sz >= MaxMetaspaceSize) {
4599         vm_exit_during_initialization("MaxMetaspaceSize is too small.");
4600       } else {
4601         FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
4602                       MaxMetaspaceSize - min_metaspace_sz);
4603       }
4604     }
4605   } else if (min_metaspace_sz >= MaxMetaspaceSize) {
4606     FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
4607                   min_metaspace_sz);
4608   }
4609 
4610   set_compressed_class_space_size(CompressedClassSpaceSize);
4611 }
4612 
4613 void Metaspace::global_initialize() {
4614   MetaspaceGC::initialize();
4615 
4616 #if INCLUDE_CDS
4617   if (DumpSharedSpaces) {
4618     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
4619   } else if (UseSharedSpaces) {
4620     // If any of the archived space fails to map, UseSharedSpaces
4621     // is reset to false. Fall through to the
4622     // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
4623     // metaspace.
4624     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
4625   }
4626 
4627   if (!DumpSharedSpaces && !UseSharedSpaces)
4628 #endif // INCLUDE_CDS
4629   {
4630 #ifdef _LP64
4631     if (using_class_space()) {
4632       char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
4633       allocate_metaspace_compressed_klass_ptrs(base, 0);
4634     }
4635 #endif // _LP64
4636   }
4637 
4638   // Initialize these before initializing the VirtualSpaceList
4639   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4640   _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4641   // Make the first class chunk bigger than a medium chunk so it's not put
4642   // on the medium chunk list.   The next chunk will be small and progress
4643   // from there.  This size calculated by -version.
4644   _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4645                                      (CompressedClassSpaceSize/BytesPerWord)*2);
4646   _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4647   // Arbitrarily set the initial virtual space to a multiple
4648   // of the boot class loader size.
4649   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4650   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4651 
4652   // Initialize the list of virtual spaces.
4653   _space_list = new VirtualSpaceList(word_size);
4654   _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4655 
4656   if (!_space_list->initialization_succeeded()) {
4657     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4658   }
4659 
4660   _tracer = new MetaspaceTracer();
4661 }
4662 
4663 void Metaspace::post_initialize() {
4664   MetaspaceGC::post_initialize();
4665 }
4666 
4667 void Metaspace::verify_global_initialization() {
4668   assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
4669   assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
4670 
4671   if (using_class_space()) {
4672     assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
4673     assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
4674   }
4675 }
4676 
4677 size_t Metaspace::align_word_size_up(size_t word_size) {
4678   size_t byte_size = word_size * wordSize;
4679   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
4680 }
4681 
4682 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
4683                               MetaspaceObj::Type type, TRAPS) {
4684   assert(!_frozen, "sanity");
4685   if (HAS_PENDING_EXCEPTION) {
4686     assert(false, "Should not allocate with exception pending");
4687     return NULL;  // caller does a CHECK_NULL too
4688   }
4689 
4690   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
4691         "ClassLoaderData::the_null_class_loader_data() should have been used.");
4692 
4693   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4694 
4695   // Try to allocate metadata.
4696   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4697 
4698   if (result == NULL) {
4699     if (DumpSharedSpaces && THREAD->is_VM_thread()) {
4700       tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
4701           MetaspaceObj::type_name(type), word_size * BytesPerWord);
4702       vm_exit(1);
4703     }
4704 
4705     tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4706 
4707     // Allocation failed.
4708     if (is_init_completed()) {
4709       // Only start a GC if the bootstrapping has completed.
4710 
4711       // Try to clean out some memory and retry.
4712       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4713     }
4714   }
4715 
4716   if (result == NULL) {
4717     report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4718   }
4719 
4720   // Zero initialize.
4721   Copy::fill_to_words((HeapWord*)result, word_size, 0);
4722 
4723   return result;
4724 }
4725 
4726 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4727   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4728 
4729   // If result is still null, we are out of memory.
4730   Log(gc, metaspace, freelist) log;
4731   if (log.is_info()) {
4732     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4733              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4734     ResourceMark rm;
4735     if (log.is_debug()) {
4736       if (loader_data->metaspace_or_null() != NULL) {
4737         LogStream ls(log.debug());
4738         loader_data->print_value_on(&ls);
4739       }
4740     }
4741     LogStream ls(log.info());
4742     MetaspaceUtils::dump(&ls);
4743     MetaspaceUtils::print_metaspace_map(&ls, mdtype);
4744     ChunkManager::print_all_chunkmanagers(&ls);
4745   }
4746 
4747   bool out_of_compressed_class_space = false;
4748   if (is_class_space_allocation(mdtype)) {
4749     ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
4750     out_of_compressed_class_space =
4751       MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
4752       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
4753       CompressedClassSpaceSize;
4754   }
4755 
4756   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
4757   const char* space_string = out_of_compressed_class_space ?
4758     "Compressed class space" : "Metaspace";
4759 
4760   report_java_out_of_memory(space_string);
4761 
4762   if (JvmtiExport::should_post_resource_exhausted()) {
4763     JvmtiExport::post_resource_exhausted(
4764         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
4765         space_string);
4766   }
4767 
4768   if (!is_init_completed()) {
4769     vm_exit_during_initialization("OutOfMemoryError", space_string);
4770   }
4771 
4772   if (out_of_compressed_class_space) {
4773     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
4774   } else {
4775     THROW_OOP(Universe::out_of_memory_error_metaspace());
4776   }
4777 }
4778 
4779 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
4780   switch (mdtype) {
4781     case Metaspace::ClassType: return "Class";
4782     case Metaspace::NonClassType: return "Metadata";
4783     default:
4784       assert(false, "Got bad mdtype: %d", (int) mdtype);
4785       return NULL;
4786   }
4787 }
4788 
4789 void Metaspace::purge(MetadataType mdtype) {
4790   get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
4791 }
4792 
4793 void Metaspace::purge() {
4794   MutexLockerEx cl(SpaceManager::expand_lock(),
4795                    Mutex::_no_safepoint_check_flag);
4796   purge(NonClassType);
4797   if (using_class_space()) {
4798     purge(ClassType);
4799   }
4800 }
4801 
4802 bool Metaspace::contains(const void* ptr) {
4803   if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
4804     return true;
4805   }
4806   return contains_non_shared(ptr);
4807 }
4808 
4809 bool Metaspace::contains_non_shared(const void* ptr) {
4810   if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
4811      return true;
4812   }
4813 
4814   return get_space_list(NonClassType)->contains(ptr);
4815 }
4816 
4817 // ClassLoaderMetaspace
4818 
4819 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
4820   initialize(lock, type);
4821 }
4822 
4823 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
4824   delete _vsm;
4825   if (Metaspace::using_class_space()) {
4826     delete _class_vsm;
4827   }
4828 }
4829 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4830   Metachunk* chunk = get_initialization_chunk(type, mdtype);
4831   if (chunk != NULL) {
4832     // Add to this manager's list of chunks in use and current_chunk().
4833     get_space_manager(mdtype)->add_chunk(chunk, true);
4834   }
4835 }
4836 
4837 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
4838   size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
4839 
4840   // Get a chunk from the chunk freelist
4841   Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
4842 
4843   if (chunk == NULL) {
4844     chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
4845                                                   get_space_manager(mdtype)->medium_chunk_bunch());
4846   }
4847 
4848   return chunk;
4849 }
4850 
4851 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
4852   Metaspace::verify_global_initialization();
4853 
4854   // Allocate SpaceManager for metadata objects.
4855   _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
4856 
4857   if (Metaspace::using_class_space()) {
4858     // Allocate SpaceManager for classes.
4859     _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
4860   }
4861 
4862   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4863 
4864   // Allocate chunk for metadata objects
4865   initialize_first_chunk(type, Metaspace::NonClassType);
4866 
4867   // Allocate chunk for class metadata objects
4868   if (Metaspace::using_class_space()) {
4869     initialize_first_chunk(type, Metaspace::ClassType);
4870   }
4871 }
4872 
4873 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4874   Metaspace::assert_not_frozen();
4875   // Don't use class_vsm() unless UseCompressedClassPointers is true.
4876   if (Metaspace::is_class_space_allocation(mdtype)) {
4877     return  class_vsm()->allocate(word_size);
4878   } else {
4879     return  vsm()->allocate(word_size);
4880   }
4881 }
4882 
4883 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
4884   Metaspace::assert_not_frozen();
4885   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
4886   assert(delta_bytes > 0, "Must be");
4887 
4888   size_t before = 0;
4889   size_t after = 0;
4890   MetaWord* res;
4891   bool incremented;
4892 
4893   // Each thread increments the HWM at most once. Even if the thread fails to increment
4894   // the HWM, an allocation is still attempted. This is because another thread must then
4895   // have incremented the HWM and therefore the allocation might still succeed.
4896   do {
4897     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
4898     res = allocate(word_size, mdtype);
4899   } while (!incremented && res == NULL);
4900 
4901   if (incremented) {
4902     Metaspace::tracer()->report_gc_threshold(before, after,
4903                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
4904     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
4905   }
4906 
4907   return res;
4908 }
4909 
4910 size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
4911   if (mdtype == Metaspace::ClassType) {
4912     return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
4913   } else {
4914     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
4915   }
4916 }
4917 
4918 size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
4919   Metaspace::assert_not_frozen();
4920   if (mdtype == Metaspace::ClassType) {
4921     return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
4922   } else {
4923     return vsm()->sum_free_in_chunks_in_use();
4924   }
4925 }
4926 
4927 // Space capacity in the Metaspace.  It includes
4928 // space in the list of chunks from which allocations
4929 // have been made. Don't include space in the global freelist and
4930 // in the space available in the dictionary which
4931 // is already counted in some chunk.
4932 size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
4933   if (mdtype == Metaspace::ClassType) {
4934     return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
4935   } else {
4936     return vsm()->sum_capacity_in_chunks_in_use();
4937   }
4938 }
4939 
4940 size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
4941   return used_words_slow(mdtype) * BytesPerWord;
4942 }
4943 
4944 size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
4945   return capacity_words_slow(mdtype) * BytesPerWord;
4946 }
4947 
4948 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
4949   return vsm()->allocated_blocks_bytes() +
4950       (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
4951 }
4952 
4953 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
4954   return vsm()->allocated_chunks_bytes() +
4955       (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
4956 }
4957 
4958 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
4959   Metaspace::assert_not_frozen();
4960   assert(!SafepointSynchronize::is_at_safepoint()
4961          || Thread::current()->is_VM_thread(), "should be the VM thread");
4962 
4963   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
4964 
4965   if (is_class && Metaspace::using_class_space()) {
4966     class_vsm()->deallocate(ptr, word_size);
4967   } else {
4968     vsm()->deallocate(ptr, word_size);
4969   }
4970 }
4971 
4972 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
4973   assert(Metaspace::using_class_space(), "Has to use class space");
4974   return class_vsm()->calc_chunk_size(word_size);
4975 }
4976 
4977 void ClassLoaderMetaspace::print_on(outputStream* out) const {
4978   // Print both class virtual space counts and metaspace.
4979   if (Verbose) {
4980     vsm()->print_on(out);
4981     if (Metaspace::using_class_space()) {
4982       class_vsm()->print_on(out);
4983     }
4984   }
4985 }
4986 
4987 void ClassLoaderMetaspace::verify() {
4988   vsm()->verify();
4989   if (Metaspace::using_class_space()) {
4990     class_vsm()->verify();
4991   }
4992 }
4993 
4994 void ClassLoaderMetaspace::dump(outputStream* const out) const {
4995   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4996   vsm()->dump(out);
4997   if (Metaspace::using_class_space()) {
4998     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4999     class_vsm()->dump(out);
5000   }
5001 }
5002 
5003 
5004 
5005 #ifdef ASSERT
5006 static void do_verify_chunk(Metachunk* chunk) {
5007   guarantee(chunk != NULL, "Sanity");
5008   // Verify chunk itself; then verify that it is consistent with the
5009   // occupany map of its containing node.
5010   chunk->verify();
5011   VirtualSpaceNode* const vsn = chunk->container();
5012   OccupancyMap* const ocmap = vsn->occupancy_map();
5013   ocmap->verify_for_chunk(chunk);
5014 }
5015 #endif
5016 
5017 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
5018   chunk->set_is_tagged_free(!inuse);
5019   OccupancyMap* const ocmap = chunk->container()->occupancy_map();
5020   ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
5021 }
5022 
5023 /////////////// Unit tests ///////////////
5024 
5025 #ifndef PRODUCT
5026 
5027 class TestMetaspaceUtilsTest : AllStatic {
5028  public:
5029   static void test_reserved() {
5030     size_t reserved = MetaspaceUtils::reserved_bytes();
5031 
5032     assert(reserved > 0, "assert");
5033 
5034     size_t committed  = MetaspaceUtils::committed_bytes();
5035     assert(committed <= reserved, "assert");
5036 
5037     size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
5038     assert(reserved_metadata > 0, "assert");
5039     assert(reserved_metadata <= reserved, "assert");
5040 
5041     if (UseCompressedClassPointers) {
5042       size_t reserved_class    = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
5043       assert(reserved_class > 0, "assert");
5044       assert(reserved_class < reserved, "assert");
5045     }
5046   }
5047 
5048   static void test_committed() {
5049     size_t committed = MetaspaceUtils::committed_bytes();
5050 
5051     assert(committed > 0, "assert");
5052 
5053     size_t reserved  = MetaspaceUtils::reserved_bytes();
5054     assert(committed <= reserved, "assert");
5055 
5056     size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
5057     assert(committed_metadata > 0, "assert");
5058     assert(committed_metadata <= committed, "assert");
5059 
5060     if (UseCompressedClassPointers) {
5061       size_t committed_class    = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
5062       assert(committed_class > 0, "assert");
5063       assert(committed_class < committed, "assert");
5064     }
5065   }
5066 
5067   static void test_virtual_space_list_large_chunk() {
5068     VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
5069     MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5070     // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
5071     // vm_allocation_granularity aligned on Windows.
5072     size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
5073     large_size += (os::vm_page_size()/BytesPerWord);
5074     vs_list->get_new_chunk(large_size, 0);
5075   }
5076 
5077   static void test() {
5078     test_reserved();
5079     test_committed();
5080     test_virtual_space_list_large_chunk();
5081   }
5082 };
5083 
5084 void TestMetaspaceUtils_test() {
5085   TestMetaspaceUtilsTest::test();
5086 }
5087 
5088 class TestVirtualSpaceNodeTest {
5089   static void chunk_up(size_t words_left, size_t& num_medium_chunks,
5090                                           size_t& num_small_chunks,
5091                                           size_t& num_specialized_chunks) {
5092     num_medium_chunks = words_left / MediumChunk;
5093     words_left = words_left % MediumChunk;
5094 
5095     num_small_chunks = words_left / SmallChunk;
5096     words_left = words_left % SmallChunk;
5097     // how many specialized chunks can we get?
5098     num_specialized_chunks = words_left / SpecializedChunk;
5099     assert(words_left % SpecializedChunk == 0, "should be nothing left");
5100   }
5101 
5102  public:
5103   static void test() {
5104     MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5105     const size_t vsn_test_size_words = MediumChunk  * 4;
5106     const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5107 
5108     // The chunk sizes must be multiples of eachother, or this will fail
5109     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5110     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5111 
5112     { // No committed memory in VSN
5113       ChunkManager cm(false);
5114       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5115       vsn.initialize();
5116       vsn.retire(&cm);
5117       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5118     }
5119 
5120     { // All of VSN is committed, half is used by chunks
5121       ChunkManager cm(false);
5122       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5123       vsn.initialize();
5124       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5125       vsn.get_chunk_vs(MediumChunk);
5126       vsn.get_chunk_vs(MediumChunk);
5127       vsn.retire(&cm);
5128       assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5129       assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5130     }
5131 
5132     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5133     // This doesn't work for systems with vm_page_size >= 16K.
5134     if (page_chunks < MediumChunk) {
5135       // 4 pages of VSN is committed, some is used by chunks
5136       ChunkManager cm(false);
5137       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5138 
5139       vsn.initialize();
5140       vsn.expand_by(page_chunks, page_chunks);
5141       vsn.get_chunk_vs(SmallChunk);
5142       vsn.get_chunk_vs(SpecializedChunk);
5143       vsn.retire(&cm);
5144 
5145       // committed - used = words left to retire
5146       const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5147 
5148       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5149       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5150 
5151       assert(num_medium_chunks == 0, "should not get any medium chunks");
5152       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5153       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5154     }
5155 
5156     { // Half of VSN is committed, a humongous chunk is used
5157       ChunkManager cm(false);
5158       VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5159       vsn.initialize();
5160       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5161       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5162       vsn.retire(&cm);
5163 
5164       const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5165       size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5166       chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5167 
5168       assert(num_medium_chunks == 0, "should not get any medium chunks");
5169       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5170       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5171     }
5172 
5173   }
5174 
5175 #define assert_is_available_positive(word_size) \
5176   assert(vsn.is_available(word_size), \
5177          #word_size ": " PTR_FORMAT " bytes were not available in " \
5178          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5179          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5180 
5181 #define assert_is_available_negative(word_size) \
5182   assert(!vsn.is_available(word_size), \
5183          #word_size ": " PTR_FORMAT " bytes should not be available in " \
5184          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5185          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5186 
5187   static void test_is_available_positive() {
5188     // Reserve some memory.
5189     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5190     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5191 
5192     // Commit some memory.
5193     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5194     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5195     assert(expanded, "Failed to commit");
5196 
5197     // Check that is_available accepts the committed size.
5198     assert_is_available_positive(commit_word_size);
5199 
5200     // Check that is_available accepts half the committed size.
5201     size_t expand_word_size = commit_word_size / 2;
5202     assert_is_available_positive(expand_word_size);
5203   }
5204 
5205   static void test_is_available_negative() {
5206     // Reserve some memory.
5207     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5208     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5209 
5210     // Commit some memory.
5211     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5212     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5213     assert(expanded, "Failed to commit");
5214 
5215     // Check that is_available doesn't accept a too large size.
5216     size_t two_times_commit_word_size = commit_word_size * 2;
5217     assert_is_available_negative(two_times_commit_word_size);
5218   }
5219 
5220   static void test_is_available_overflow() {
5221     // Reserve some memory.
5222     VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5223     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5224 
5225     // Commit some memory.
5226     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5227     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5228     assert(expanded, "Failed to commit");
5229 
5230     // Calculate a size that will overflow the virtual space size.
5231     void* virtual_space_max = (void*)(uintptr_t)-1;
5232     size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5233     size_t overflow_size = bottom_to_max + BytesPerWord;
5234     size_t overflow_word_size = overflow_size / BytesPerWord;
5235 
5236     // Check that is_available can handle the overflow.
5237     assert_is_available_negative(overflow_word_size);
5238   }
5239 
5240   static void test_is_available() {
5241     TestVirtualSpaceNodeTest::test_is_available_positive();
5242     TestVirtualSpaceNodeTest::test_is_available_negative();
5243     TestVirtualSpaceNodeTest::test_is_available_overflow();
5244   }
5245 };
5246 
5247 // The following test is placed here instead of a gtest / unittest file
5248 // because the ChunkManager class is only available in this file.
5249 void ChunkManager_test_list_index() {
5250   ChunkManager manager(true);
5251 
5252   // Test previous bug where a query for a humongous class metachunk,
5253   // incorrectly matched the non-class medium metachunk size.
5254   {
5255     assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5256 
5257     ChunkIndex index = manager.list_index(MediumChunk);
5258 
5259     assert(index == HumongousIndex,
5260            "Requested size is larger than ClassMediumChunk,"
5261            " so should return HumongousIndex. Got index: %d", (int)index);
5262   }
5263 
5264   // Check the specified sizes as well.
5265   {
5266     ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5267     assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5268   }
5269   {
5270     ChunkIndex index = manager.list_index(ClassSmallChunk);
5271     assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5272   }
5273   {
5274     ChunkIndex index = manager.list_index(ClassMediumChunk);
5275     assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5276   }
5277   {
5278     ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5279     assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5280   }
5281 }
5282 
5283 void Metadebug::test_anon_delegating_metaspace_sizing(ClassLoaderMetaspace* msp) {
5284   SpaceManager* smgr = msp->vsm();
5285   assert(smgr != NULL, "Sanity");
5286 
5287   // Anonymous and refection delegating classloader should use up to 
5288   // _anon_and_delegating_metadata_specialize_chunk_limit small chunks before jump to midium chunks,
5289   // to reduce memory waste.
5290   const size_t small_chunk_size_limit = SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit *
5291      SpaceManager::small_chunk_size(false);
5292  
5293   if (smgr->metaspace_type() == Metaspace::AnonymousMetaspaceType ||
5294       smgr->metaspace_type() == Metaspace::ReflectionMetaspaceType) {
5295     if (smgr->sum_used_in_chunks_in_use() <= small_chunk_size_limit) {
5296       assert(smgr->sum_capacity_in_chunks_in_use() <= small_chunk_size_limit,
5297         "Incorrect sizing for Anonymous or Reflection data metadata space");
5298     }
5299   }
5300 }
5301 
5302 void Metaspace_test_anonymous_delegating_classloader_sizing(ClassLoaderMetaspace* msp) {
5303   Metadebug::test_anon_delegating_metaspace_sizing(msp);
5304 }
5305 
5306 #endif // !PRODUCT
5307 
5308 #ifdef ASSERT
5309 
5310 // The following test is placed here instead of a gtest / unittest file
5311 // because the ChunkManager class is only available in this file.
5312 class SpaceManagerTest : AllStatic {
5313   friend void SpaceManager_test_adjust_initial_chunk_size();
5314 
5315   static void test_adjust_initial_chunk_size(bool is_class) {
5316     const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5317     const size_t normal   = SpaceManager::small_chunk_size(is_class);
5318     const size_t medium   = SpaceManager::medium_chunk_size(is_class);
5319 
5320 #define test_adjust_initial_chunk_size(value, expected, is_class_value)          \
5321     do {                                                                         \
5322       size_t v = value;                                                          \
5323       size_t e = expected;                                                       \
5324       assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e,  \
5325              "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v);               \
5326     } while (0)
5327 
5328     // Smallest (specialized)
5329     test_adjust_initial_chunk_size(1,            smallest, is_class);
5330     test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
5331     test_adjust_initial_chunk_size(smallest,     smallest, is_class);
5332 
5333     // Small
5334     test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
5335     test_adjust_initial_chunk_size(normal - 1,   normal, is_class);
5336     test_adjust_initial_chunk_size(normal,       normal, is_class);
5337 
5338     // Medium
5339     test_adjust_initial_chunk_size(normal + 1, medium, is_class);
5340     test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5341     test_adjust_initial_chunk_size(medium,     medium, is_class);
5342 
5343     // Humongous
5344     test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5345 
5346 #undef test_adjust_initial_chunk_size
5347   }
5348 
5349   static void test_adjust_initial_chunk_size() {
5350     test_adjust_initial_chunk_size(false);
5351     test_adjust_initial_chunk_size(true);
5352   }
5353 };
5354 
5355 void SpaceManager_test_adjust_initial_chunk_size() {
5356   SpaceManagerTest::test_adjust_initial_chunk_size();
5357 }
5358 
5359 #endif // ASSERT
5360 
5361 struct chunkmanager_statistics_t {
5362   int num_specialized_chunks;
5363   int num_small_chunks;
5364   int num_medium_chunks;
5365   int num_humongous_chunks;
5366 };
5367 
5368 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5369   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5370   ChunkManager::ChunkManagerStatistics stat;
5371   chunk_manager->get_statistics(&stat);
5372   out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5373   out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5374   out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5375   out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5376 }
5377 
5378 struct chunk_geometry_t {
5379   size_t specialized_chunk_word_size;
5380   size_t small_chunk_word_size;
5381   size_t medium_chunk_word_size;
5382 };
5383 
5384 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5385   if (mdType == Metaspace::NonClassType) {
5386     out->specialized_chunk_word_size = SpecializedChunk;
5387     out->small_chunk_word_size = SmallChunk;
5388     out->medium_chunk_word_size = MediumChunk;
5389   } else {
5390     out->specialized_chunk_word_size = ClassSpecializedChunk;
5391     out->small_chunk_word_size = ClassSmallChunk;
5392     out->medium_chunk_word_size = ClassMediumChunk;
5393   }
5394 }
5395